summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc3
-rw-r--r--.dockerignore16
-rw-r--r--.gitattributes1
-rw-r--r--.github/workflows/build.yaml72
-rw-r--r--.github/workflows/docs.yaml40
-rw-r--r--.gitignore21
-rw-r--r--AUTHORS264
-rw-r--r--CHANGES2134
-rw-r--r--Contributing.md167
-rw-r--r--LICENSE25
-rw-r--r--MANIFEST.in5
-rw-r--r--Makefile68
-rw-r--r--README.rst93
-rw-r--r--description.rst18
-rw-r--r--doc/Makefile163
-rw-r--r--doc/_static/demo-worker.js74
-rw-r--r--doc/_static/demo.css89
-rw-r--r--doc/_static/demo.js200
-rw-r--r--doc/_static/favicon.icobin0 -> 16958 bytes
-rw-r--r--doc/_static/github.pngbin0 -> 1127 bytes
-rw-r--r--doc/_static/logo_new.pngbin0 -> 40944 bytes
-rw-r--r--doc/_static/logo_only.pngbin0 -> 16424 bytes
-rw-r--r--doc/_static/spinner.gifbin0 -> 10771 bytes
-rw-r--r--doc/_templates/demo.html97
-rw-r--r--doc/_templates/demo_sidebar.html1
-rw-r--r--doc/_templates/docssidebar.html3
-rw-r--r--doc/_templates/index_with_try.html0
-rw-r--r--doc/_templates/indexsidebar.html18
-rw-r--r--doc/_templates/styles.html55
-rw-r--r--doc/_themes/pygments14/layout.html101
-rw-r--r--doc/_themes/pygments14/localtoc.html17
-rw-r--r--doc/_themes/pygments14/relations.html25
-rw-r--r--doc/_themes/pygments14/static/bodybg.pngbin0 -> 51903 bytes
-rw-r--r--doc/_themes/pygments14/static/docbg.pngbin0 -> 61296 bytes
-rw-r--r--doc/_themes/pygments14/static/listitem.pngbin0 -> 207 bytes
-rw-r--r--doc/_themes/pygments14/static/logo.pngbin0 -> 26933 bytes
-rw-r--r--doc/_themes/pygments14/static/pocoo.pngbin0 -> 2154 bytes
-rw-r--r--doc/_themes/pygments14/static/pygments14.css_t422
-rw-r--r--doc/_themes/pygments14/theme.conf17
-rw-r--r--doc/conf.py291
-rw-r--r--doc/docs/api.rst360
-rw-r--r--doc/docs/authors.rst4
-rw-r--r--doc/docs/changelog.rst1
-rw-r--r--doc/docs/cmdline.rst218
-rw-r--r--doc/docs/filterdevelopment.rst75
-rw-r--r--doc/docs/filters.rst48
-rw-r--r--doc/docs/formatterdevelopment.rst169
-rw-r--r--doc/docs/formatters.rst48
-rw-r--r--doc/docs/index.rst64
-rw-r--r--doc/docs/integrate.rst40
-rw-r--r--doc/docs/java.rst70
-rw-r--r--doc/docs/lexerdevelopment.rst748
-rw-r--r--doc/docs/lexers.rst69
-rw-r--r--doc/docs/moinmoin.rst39
-rw-r--r--doc/docs/plugins.rst122
-rw-r--r--doc/docs/quickstart.rst205
-rw-r--r--doc/docs/rstdirective.rst22
-rw-r--r--doc/docs/security.rst31
-rw-r--r--doc/docs/styledevelopment.rst96
-rw-r--r--doc/docs/styles.rst157
-rw-r--r--doc/docs/terminal-sessions.rst46
-rw-r--r--doc/docs/tokens.rst376
-rw-r--r--doc/docs/unicode.rst58
-rw-r--r--doc/download.rst39
-rw-r--r--doc/examples/example.py14
-rw-r--r--doc/faq.rst142
-rw-r--r--doc/index.rst47
-rw-r--r--doc/languages.rst18
-rw-r--r--doc/make.bat190
-rw-r--r--doc/pygmentize.1112
-rw-r--r--doc/pyodide/Dockerfile20
-rw-r--r--doc/pyodide/meta.yaml8
-rw-r--r--doc/styles.rst5
-rwxr-xr-xexternal/autopygmentize145
-rwxr-xr-xexternal/lasso-builtins-generator-9.lasso162
-rw-r--r--external/lilypond-builtins-generator.ly391
-rw-r--r--external/markdown-processor.py66
-rw-r--r--external/moin-parser.py111
-rw-r--r--external/pygments.bashcomp38
-rw-r--r--external/rst-directive.py81
-rw-r--r--external/scheme-builtins-generator.scm116
-rw-r--r--pygments/__init__.py82
-rw-r--r--pygments/__main__.py17
-rw-r--r--pygments/cmdline.py668
-rw-r--r--pygments/console.py70
-rw-r--r--pygments/filter.py71
-rw-r--r--pygments/filters/__init__.py940
-rw-r--r--pygments/formatter.py94
-rw-r--r--pygments/formatters/__init__.py142
-rwxr-xr-xpygments/formatters/_mapping.py23
-rw-r--r--pygments/formatters/bbcode.py108
-rw-r--r--pygments/formatters/groff.py170
-rw-r--r--pygments/formatters/html.py991
-rw-r--r--pygments/formatters/img.py645
-rw-r--r--pygments/formatters/irc.py154
-rw-r--r--pygments/formatters/latex.py521
-rw-r--r--pygments/formatters/other.py161
-rw-r--r--pygments/formatters/pangomarkup.py83
-rw-r--r--pygments/formatters/rtf.py146
-rw-r--r--pygments/formatters/svg.py188
-rw-r--r--pygments/formatters/terminal.py127
-rw-r--r--pygments/formatters/terminal256.py338
-rw-r--r--pygments/lexer.py883
-rw-r--r--pygments/lexers/__init__.py334
-rw-r--r--pygments/lexers/_ada_builtins.py103
-rw-r--r--pygments/lexers/_asy_builtins.py1644
-rw-r--r--pygments/lexers/_cl_builtins.py231
-rw-r--r--pygments/lexers/_cocoa_builtins.py75
-rw-r--r--pygments/lexers/_csound_builtins.py1780
-rw-r--r--pygments/lexers/_css_builtins.py558
-rw-r--r--pygments/lexers/_julia_builtins.py411
-rw-r--r--pygments/lexers/_lasso_builtins.py5326
-rw-r--r--pygments/lexers/_lilypond_builtins.py4886
-rw-r--r--pygments/lexers/_lua_builtins.py285
-rw-r--r--pygments/lexers/_mapping.py553
-rw-r--r--pygments/lexers/_mql_builtins.py1171
-rw-r--r--pygments/lexers/_mysql_builtins.py1335
-rw-r--r--pygments/lexers/_openedge_builtins.py2600
-rw-r--r--pygments/lexers/_php_builtins.py3325
-rw-r--r--pygments/lexers/_postgres_builtins.py684
-rw-r--r--pygments/lexers/_qlik_builtins.py666
-rw-r--r--pygments/lexers/_scheme_builtins.py1609
-rw-r--r--pygments/lexers/_scilab_builtins.py3093
-rw-r--r--pygments/lexers/_sourcemod_builtins.py1151
-rw-r--r--pygments/lexers/_stan_builtins.py648
-rw-r--r--pygments/lexers/_stata_builtins.py457
-rw-r--r--pygments/lexers/_tsql_builtins.py1003
-rw-r--r--pygments/lexers/_usd_builtins.py112
-rw-r--r--pygments/lexers/_vbscript_builtins.py279
-rw-r--r--pygments/lexers/_vim_builtins.py1938
-rw-r--r--pygments/lexers/actionscript.py245
-rw-r--r--pygments/lexers/ada.py144
-rw-r--r--pygments/lexers/agile.py23
-rw-r--r--pygments/lexers/algebra.py302
-rw-r--r--pygments/lexers/ambient.py76
-rw-r--r--pygments/lexers/amdgpu.py53
-rw-r--r--pygments/lexers/ampl.py88
-rw-r--r--pygments/lexers/apdlexer.py447
-rw-r--r--pygments/lexers/apl.py104
-rw-r--r--pygments/lexers/archetype.py319
-rw-r--r--pygments/lexers/arrow.py117
-rw-r--r--pygments/lexers/arturo.py250
-rw-r--r--pygments/lexers/asc.py55
-rw-r--r--pygments/lexers/asm.py1037
-rw-r--r--pygments/lexers/automation.py381
-rw-r--r--pygments/lexers/bare.py102
-rw-r--r--pygments/lexers/basic.py665
-rw-r--r--pygments/lexers/bdd.py58
-rw-r--r--pygments/lexers/berry.py99
-rw-r--r--pygments/lexers/bibtex.py159
-rw-r--r--pygments/lexers/boa.py97
-rw-r--r--pygments/lexers/business.py626
-rw-r--r--pygments/lexers/c_cpp.py409
-rw-r--r--pygments/lexers/c_like.py666
-rw-r--r--pygments/lexers/capnproto.py75
-rw-r--r--pygments/lexers/cddl.py173
-rw-r--r--pygments/lexers/chapel.py136
-rw-r--r--pygments/lexers/clean.py179
-rw-r--r--pygments/lexers/comal.py80
-rw-r--r--pygments/lexers/compiled.py34
-rw-r--r--pygments/lexers/configs.py1174
-rw-r--r--pygments/lexers/console.py114
-rw-r--r--pygments/lexers/cplint.py44
-rw-r--r--pygments/lexers/crystal.py365
-rw-r--r--pygments/lexers/csound.py468
-rw-r--r--pygments/lexers/css.py602
-rw-r--r--pygments/lexers/d.py258
-rw-r--r--pygments/lexers/dalvik.py127
-rw-r--r--pygments/lexers/data.py767
-rw-r--r--pygments/lexers/devicetree.py109
-rw-r--r--pygments/lexers/diff.py165
-rw-r--r--pygments/lexers/dotnet.py729
-rw-r--r--pygments/lexers/dsls.py981
-rw-r--r--pygments/lexers/dylan.py287
-rw-r--r--pygments/lexers/ecl.py145
-rw-r--r--pygments/lexers/eiffel.py69
-rw-r--r--pygments/lexers/elm.py124
-rw-r--r--pygments/lexers/elpi.py165
-rw-r--r--pygments/lexers/email.py132
-rw-r--r--pygments/lexers/erlang.py528
-rw-r--r--pygments/lexers/esoteric.py301
-rw-r--r--pygments/lexers/ezhil.py77
-rw-r--r--pygments/lexers/factor.py364
-rw-r--r--pygments/lexers/fantom.py251
-rw-r--r--pygments/lexers/felix.py276
-rw-r--r--pygments/lexers/fift.py67
-rw-r--r--pygments/lexers/floscript.py82
-rw-r--r--pygments/lexers/forth.py179
-rw-r--r--pygments/lexers/fortran.py213
-rw-r--r--pygments/lexers/foxpro.py427
-rw-r--r--pygments/lexers/freefem.py894
-rw-r--r--pygments/lexers/func.py108
-rw-r--r--pygments/lexers/functional.py20
-rw-r--r--pygments/lexers/futhark.py106
-rw-r--r--pygments/lexers/gcodelexer.py35
-rw-r--r--pygments/lexers/gdscript.py188
-rw-r--r--pygments/lexers/go.py98
-rw-r--r--pygments/lexers/grammar_notation.py265
-rw-r--r--pygments/lexers/graph.py105
-rw-r--r--pygments/lexers/graphics.py797
-rw-r--r--pygments/lexers/graphviz.py59
-rwxr-xr-xpygments/lexers/gsql.py104
-rw-r--r--pygments/lexers/haskell.py871
-rw-r--r--pygments/lexers/haxe.py937
-rw-r--r--pygments/lexers/hdl.py465
-rw-r--r--pygments/lexers/hexdump.py102
-rw-r--r--pygments/lexers/html.py605
-rw-r--r--pygments/lexers/idl.py285
-rw-r--r--pygments/lexers/igor.py420
-rw-r--r--pygments/lexers/inferno.py96
-rw-r--r--pygments/lexers/installers.py327
-rw-r--r--pygments/lexers/int_fiction.py1382
-rw-r--r--pygments/lexers/iolang.py62
-rw-r--r--pygments/lexers/j.py152
-rw-r--r--pygments/lexers/javascript.py1588
-rw-r--r--pygments/lexers/jmespath.py68
-rw-r--r--pygments/lexers/jslt.py95
-rw-r--r--pygments/lexers/jsonnet.py168
-rw-r--r--pygments/lexers/julia.py294
-rw-r--r--pygments/lexers/jvm.py1820
-rw-r--r--pygments/lexers/kuin.py333
-rw-r--r--pygments/lexers/lilypond.py226
-rw-r--r--pygments/lexers/lisp.py2838
-rw-r--r--pygments/lexers/macaulay2.py1739
-rw-r--r--pygments/lexers/make.py209
-rw-r--r--pygments/lexers/markup.py765
-rw-r--r--pygments/lexers/math.py20
-rw-r--r--pygments/lexers/matlab.py3308
-rw-r--r--pygments/lexers/maxima.py85
-rw-r--r--pygments/lexers/meson.py140
-rw-r--r--pygments/lexers/mime.py210
-rw-r--r--pygments/lexers/minecraft.py394
-rw-r--r--pygments/lexers/mips.py128
-rw-r--r--pygments/lexers/ml.py960
-rw-r--r--pygments/lexers/modeling.py369
-rw-r--r--pygments/lexers/modula2.py1580
-rw-r--r--pygments/lexers/monte.py204
-rw-r--r--pygments/lexers/mosel.py447
-rw-r--r--pygments/lexers/ncl.py893
-rw-r--r--pygments/lexers/nimrod.py200
-rw-r--r--pygments/lexers/nit.py64
-rw-r--r--pygments/lexers/nix.py135
-rw-r--r--pygments/lexers/oberon.py120
-rw-r--r--pygments/lexers/objective.py505
-rw-r--r--pygments/lexers/ooc.py85
-rw-r--r--pygments/lexers/other.py40
-rw-r--r--pygments/lexers/parasail.py79
-rw-r--r--pygments/lexers/parsers.py801
-rw-r--r--pygments/lexers/pascal.py641
-rw-r--r--pygments/lexers/pawn.py202
-rw-r--r--pygments/lexers/perl.py733
-rw-r--r--pygments/lexers/phix.py364
-rw-r--r--pygments/lexers/php.py319
-rw-r--r--pygments/lexers/pointless.py71
-rw-r--r--pygments/lexers/pony.py93
-rw-r--r--pygments/lexers/praat.py304
-rw-r--r--pygments/lexers/procfile.py42
-rw-r--r--pygments/lexers/prolog.py304
-rw-r--r--pygments/lexers/promql.py175
-rw-r--r--pygments/lexers/python.py1204
-rw-r--r--pygments/lexers/q.py188
-rw-r--r--pygments/lexers/qlik.py117
-rw-r--r--pygments/lexers/qvt.py151
-rw-r--r--pygments/lexers/r.py190
-rw-r--r--pygments/lexers/rdf.py462
-rw-r--r--pygments/lexers/rebol.py430
-rw-r--r--pygments/lexers/resource.py84
-rw-r--r--pygments/lexers/ride.py139
-rw-r--r--pygments/lexers/rita.py43
-rw-r--r--pygments/lexers/rnc.py67
-rw-r--r--pygments/lexers/roboconf.py81
-rw-r--r--pygments/lexers/robotframework.py552
-rw-r--r--pygments/lexers/ruby.py523
-rw-r--r--pygments/lexers/rust.py223
-rw-r--r--pygments/lexers/sas.py227
-rw-r--r--pygments/lexers/savi.py170
-rw-r--r--pygments/lexers/scdoc.py79
-rw-r--r--pygments/lexers/scripting.py1286
-rw-r--r--pygments/lexers/sgf.py60
-rw-r--r--pygments/lexers/shell.py918
-rw-r--r--pygments/lexers/sieve.py78
-rw-r--r--pygments/lexers/slash.py184
-rw-r--r--pygments/lexers/smalltalk.py196
-rw-r--r--pygments/lexers/smithy.py78
-rw-r--r--pygments/lexers/smv.py78
-rw-r--r--pygments/lexers/snobol.py82
-rw-r--r--pygments/lexers/solidity.py87
-rw-r--r--pygments/lexers/sophia.py103
-rw-r--r--pygments/lexers/special.py116
-rw-r--r--pygments/lexers/spice.py71
-rw-r--r--pygments/lexers/sql.py838
-rw-r--r--pygments/lexers/srcinfo.py62
-rw-r--r--pygments/lexers/stata.py171
-rw-r--r--pygments/lexers/supercollider.py95
-rw-r--r--pygments/lexers/tal.py74
-rw-r--r--pygments/lexers/tcl.py149
-rw-r--r--pygments/lexers/teal.py89
-rw-r--r--pygments/lexers/templates.py2300
-rw-r--r--pygments/lexers/teraterm.py326
-rw-r--r--pygments/lexers/testing.py210
-rw-r--r--pygments/lexers/text.py26
-rw-r--r--pygments/lexers/textedit.py202
-rw-r--r--pygments/lexers/textfmts.py431
-rw-r--r--pygments/lexers/theorem.py484
-rw-r--r--pygments/lexers/thingsdb.py116
-rw-r--r--pygments/lexers/tlb.py57
-rw-r--r--pygments/lexers/tnt.py271
-rw-r--r--pygments/lexers/trafficscript.py51
-rw-r--r--pygments/lexers/typoscript.py217
-rw-r--r--pygments/lexers/ul4.py267
-rw-r--r--pygments/lexers/unicon.py411
-rw-r--r--pygments/lexers/urbi.py145
-rw-r--r--pygments/lexers/usd.py90
-rw-r--r--pygments/lexers/varnish.py189
-rw-r--r--pygments/lexers/verification.py114
-rw-r--r--pygments/lexers/web.py23
-rw-r--r--pygments/lexers/webassembly.py120
-rw-r--r--pygments/lexers/webidl.py299
-rw-r--r--pygments/lexers/webmisc.py1010
-rw-r--r--pygments/lexers/whiley.py116
-rw-r--r--pygments/lexers/wowtoc.py120
-rw-r--r--pygments/lexers/wren.py99
-rw-r--r--pygments/lexers/x10.py67
-rw-r--r--pygments/lexers/xorg.py37
-rw-r--r--pygments/lexers/yang.py104
-rw-r--r--pygments/lexers/zig.py124
-rw-r--r--pygments/modeline.py43
-rw-r--r--pygments/plugin.py88
-rw-r--r--pygments/regexopt.py91
-rw-r--r--pygments/scanner.py104
-rw-r--r--pygments/sphinxext.py217
-rw-r--r--pygments/style.py197
-rw-r--r--pygments/styles/__init__.py97
-rw-r--r--pygments/styles/abap.py28
-rw-r--r--pygments/styles/algol.py61
-rw-r--r--pygments/styles/algol_nu.py61
-rw-r--r--pygments/styles/arduino.py96
-rw-r--r--pygments/styles/autumn.py62
-rw-r--r--pygments/styles/borland.py48
-rw-r--r--pygments/styles/bw.py47
-rw-r--r--pygments/styles/colorful.py78
-rw-r--r--pygments/styles/default.py71
-rw-r--r--pygments/styles/dracula.py102
-rw-r--r--pygments/styles/emacs.py70
-rw-r--r--pygments/styles/friendly.py71
-rw-r--r--pygments/styles/friendly_grayscale.py75
-rw-r--r--pygments/styles/fruity.py41
-rw-r--r--pygments/styles/gh_dark.py107
-rw-r--r--pygments/styles/gruvbox.py109
-rw-r--r--pygments/styles/igor.py27
-rw-r--r--pygments/styles/inkpot.py67
-rw-r--r--pygments/styles/lilypond.py56
-rw-r--r--pygments/styles/lovelace.py94
-rw-r--r--pygments/styles/manni.py74
-rw-r--r--pygments/styles/material.py117
-rw-r--r--pygments/styles/monokai.py106
-rw-r--r--pygments/styles/murphy.py77
-rw-r--r--pygments/styles/native.py65
-rw-r--r--pygments/styles/nord.py150
-rw-r--r--pygments/styles/onedark.py59
-rw-r--r--pygments/styles/paraiso_dark.py119
-rw-r--r--pygments/styles/paraiso_light.py119
-rw-r--r--pygments/styles/pastie.py72
-rw-r--r--pygments/styles/perldoc.py67
-rw-r--r--pygments/styles/rainbow_dash.py88
-rw-r--r--pygments/styles/rrt.py33
-rw-r--r--pygments/styles/sas.py41
-rw-r--r--pygments/styles/solarized.py136
-rw-r--r--pygments/styles/staroffice.py26
-rw-r--r--pygments/styles/stata_dark.py38
-rw-r--r--pygments/styles/stata_light.py37
-rw-r--r--pygments/styles/tango.py139
-rw-r--r--pygments/styles/trac.py60
-rw-r--r--pygments/styles/vim.py61
-rw-r--r--pygments/styles/vs.py36
-rw-r--r--pygments/styles/xcode.py48
-rw-r--r--pygments/styles/zenburn.py78
-rw-r--r--pygments/token.py213
-rw-r--r--pygments/unistring.py153
-rw-r--r--pygments/util.py308
-rw-r--r--pyproject.toml6
-rw-r--r--requirements.txt7
-rw-r--r--scripts/check_crlf.py32
-rwxr-xr-xscripts/check_repeated_token.py77
-rwxr-xr-xscripts/check_sources.py201
-rw-r--r--scripts/check_whitespace_token.py52
-rwxr-xr-xscripts/count_token_references.py270
-rwxr-xr-xscripts/debug_lexer.py306
-rw-r--r--scripts/detect_missing_analyse_text.py48
-rw-r--r--scripts/gen_mapfiles.py53
-rw-r--r--scripts/get_css_properties.py33
-rw-r--r--scripts/get_vimkw.py72
-rw-r--r--scripts/pylintrc301
-rw-r--r--scripts/release-checklist24
-rwxr-xr-xscripts/update_contrasts.py21
-rw-r--r--scripts/utility.py69
-rwxr-xr-xscripts/vim2pygments.py932
-rw-r--r--setup.cfg54
-rwxr-xr-xsetup.py4
-rw-r--r--tests/__init__.py7
-rw-r--r--tests/conftest.py127
-rw-r--r--tests/contrast/min_contrasts.json50
-rw-r--r--tests/contrast/test_contrasts.py101
-rw-r--r--tests/dtds/HTML4-f.dtd37
-rw-r--r--tests/dtds/HTML4-s.dtd869
-rw-r--r--tests/dtds/HTML4.dcl88
-rw-r--r--tests/dtds/HTML4.dtd1092
-rw-r--r--tests/dtds/HTML4.soc9
-rw-r--r--tests/dtds/HTMLlat1.ent195
-rw-r--r--tests/dtds/HTMLspec.ent77
-rw-r--r--tests/dtds/HTMLsym.ent241
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_filename.html4
-rw-r--r--tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_nofilename.html4
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_nofilename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_filename.html6
-rw-r--r--tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_nofilename.html6
-rw-r--r--tests/snippets/apacheconf/test_directive_no_args.txt12
-rw-r--r--tests/snippets/apacheconf/test_fix_lock_absolute_path.txt8
-rw-r--r--tests/snippets/apacheconf/test_include_globs.txt8
-rw-r--r--tests/snippets/apacheconf/test_malformed_scoped_directive_closing_tag.txt19
-rw-r--r--tests/snippets/apacheconf/test_multi_include_globs.txt8
-rw-r--r--tests/snippets/apacheconf/test_multi_include_globs_root.txt8
-rw-r--r--tests/snippets/apacheconf/test_multiline_argument.txt20
-rw-r--r--tests/snippets/apacheconf/test_multiline_comment.txt12
-rw-r--r--tests/snippets/apacheconf/test_normal_scoped_directive.txt14
-rw-r--r--tests/snippets/apl/test_leading_underscore.txt26
-rw-r--r--tests/snippets/asm/test_cpuid.txt9
-rw-r--r--tests/snippets/bibtex/test_basic_bst.txt54
-rw-r--r--tests/snippets/bibtex/test_comment.txt7
-rw-r--r--tests/snippets/bibtex/test_entry.txt63
-rw-r--r--tests/snippets/bibtex/test_mismatched_brace.txt10
-rw-r--r--tests/snippets/bibtex/test_missing_body.txt10
-rw-r--r--tests/snippets/bibtex/test_preamble.txt11
-rw-r--r--tests/snippets/bibtex/test_string.txt15
-rw-r--r--tests/snippets/c/test_comment_end.txt31
-rw-r--r--tests/snippets/c/test_function_comments.txt409
-rw-r--r--tests/snippets/c/test_label.txt31
-rw-r--r--tests/snippets/c/test_label_followed_by_statement.txt35
-rw-r--r--tests/snippets/c/test_label_space_before_colon.txt32
-rw-r--r--tests/snippets/c/test_numbers.txt20
-rw-r--r--tests/snippets/c/test_preproc_file.txt17
-rw-r--r--tests/snippets/c/test_preproc_file2.txt17
-rw-r--r--tests/snippets/c/test_preproc_file3.txt18
-rw-r--r--tests/snippets/c/test_preproc_file4.txt13
-rw-r--r--tests/snippets/c/test_preproc_file5.txt19
-rw-r--r--tests/snippets/c/test_string_resembling_decl_end.txt41
-rw-r--r--tests/snippets/c/test_switch.txt56
-rw-r--r--tests/snippets/c/test_switch_space_before_colon.txt58
-rw-r--r--tests/snippets/cfm/test_basic_comment.txt8
-rw-r--r--tests/snippets/cfm/test_nested_comment.txt12
-rw-r--r--tests/snippets/coffeescript/test_beware_infinite_loop.txt14
-rw-r--r--tests/snippets/coffeescript/test_mixed_slashes.txt13
-rw-r--r--tests/snippets/conftest.py32
-rw-r--r--tests/snippets/console/fake_ps2_prompt.txt14
-rw-r--r--tests/snippets/console/prompt_in_output.txt21
-rw-r--r--tests/snippets/console/ps2_prompt.txt15
-rw-r--r--tests/snippets/console/test_comment_after_prompt.txt6
-rw-r--r--tests/snippets/console/test_newline_in_echo_no_ps2.txt16
-rw-r--r--tests/snippets/console/test_newline_in_echo_ps2.txt16
-rw-r--r--tests/snippets/console/test_newline_in_ls_no_ps2.txt16
-rw-r--r--tests/snippets/console/test_newline_in_ls_ps2.txt16
-rw-r--r--tests/snippets/console/test_virtualenv.txt11
-rw-r--r--tests/snippets/coq/test_unicode.txt15
-rw-r--r--tests/snippets/cpp/test_good_comment.txt6
-rw-r--r--tests/snippets/cpp/test_open_comment.txt5
-rw-r--r--tests/snippets/cpp/test_unicode_identifiers.txt146
-rw-r--r--tests/snippets/crystal/test_annotation.txt16
-rw-r--r--tests/snippets/crystal/test_array_access.txt11
-rw-r--r--tests/snippets/crystal/test_chars.txt25
-rw-r--r--tests/snippets/crystal/test_constant_and_module.txt14
-rw-r--r--tests/snippets/crystal/test_escaped_bracestring.txt19
-rw-r--r--tests/snippets/crystal/test_escaped_interpolation.txt9
-rw-r--r--tests/snippets/crystal/test_interpolation_nested_curly.txt56
-rw-r--r--tests/snippets/crystal/test_lib.txt58
-rw-r--r--tests/snippets/crystal/test_macro.txt76
-rw-r--r--tests/snippets/crystal/test_operator_methods.txt18
-rw-r--r--tests/snippets/crystal/test_percent_strings.txt41
-rw-r--r--tests/snippets/crystal/test_percent_strings_special.txt31
-rw-r--r--tests/snippets/crystal/test_pseudo_builtins.txt20
-rw-r--r--tests/snippets/crystal/test_pseudo_keywords.txt50
-rw-r--r--tests/snippets/crystal/test_range_syntax1.txt8
-rw-r--r--tests/snippets/crystal/test_range_syntax2.txt10
-rw-r--r--tests/snippets/csound/test_braced_strings.txt11
-rw-r--r--tests/snippets/csound/test_comments.txt16
-rw-r--r--tests/snippets/csound/test_escape_sequences.txt122
-rw-r--r--tests/snippets/csound/test_function_like_macro_definitions.txt44
-rw-r--r--tests/snippets/csound/test_function_like_macros.txt40
-rw-r--r--tests/snippets/csound/test_global_value_identifiers.txt30
-rw-r--r--tests/snippets/csound/test_goto_statements.txt176
-rw-r--r--tests/snippets/csound/test_include_directives.txt14
-rw-r--r--tests/snippets/csound/test_includestr_directives.txt11
-rw-r--r--tests/snippets/csound/test_instrument_blocks.txt42
-rw-r--r--tests/snippets/csound/test_keywords.txt62
-rw-r--r--tests/snippets/csound/test_labels.txt13
-rw-r--r--tests/snippets/csound/test_macro_preprocessor_directives.txt20
-rw-r--r--tests/snippets/csound/test_name.txt9
-rw-r--r--tests/snippets/csound/test_numbers.txt52
-rw-r--r--tests/snippets/csound/test_object_like_macro_definitions.txt30
-rw-r--r--tests/snippets/csound/test_operators.txt114
-rw-r--r--tests/snippets/csound/test_other_preprocessor_directives.txt26
-rw-r--r--tests/snippets/csound/test_printks_and_prints_escape_sequences.txt290
-rw-r--r--tests/snippets/csound/test_quoted_strings.txt9
-rw-r--r--tests/snippets/csound/test_user_defined_opcodes.txt24
-rw-r--r--tests/snippets/doscon/test_gt_only.txt11
-rw-r--r--tests/snippets/elpi/test_catastrophic_backtracking.txt6
-rw-r--r--tests/snippets/elpi/test_chr.txt54
-rw-r--r--tests/snippets/elpi/test_clause.txt67
-rw-r--r--tests/snippets/elpi/test_namespace.txt35
-rw-r--r--tests/snippets/elpi/test_pred.txt60
-rw-r--r--tests/snippets/elpi/test_type.txt112
-rw-r--r--tests/snippets/ezhil/test_function.txt100
-rw-r--r--tests/snippets/ezhil/test_gcd_expr.txt21
-rw-r--r--tests/snippets/ezhil/test_if_statement.txt28
-rw-r--r--tests/snippets/ezhil/test_sum.txt8
-rw-r--r--tests/snippets/fortran/test_string_cataback.txt112
-rw-r--r--tests/snippets/gas/test_comments.txt29
-rw-r--r--tests/snippets/gdscript/test_comment.txt6
-rw-r--r--tests/snippets/gdscript/test_export_array.txt17
-rw-r--r--tests/snippets/gdscript/test_function_with_types.txt33
-rw-r--r--tests/snippets/gdscript/test_inner_class.txt20
-rw-r--r--tests/snippets/gdscript/test_multiline_string.txt8
-rw-r--r--tests/snippets/gdscript/test_signal.txt15
-rw-r--r--tests/snippets/gdscript/test_simple_function.txt22
-rw-r--r--tests/snippets/gdscript/test_variable_declaration_and_assigment.txt12
-rw-r--r--tests/snippets/haskell/test_promoted_names.txt10
-rw-r--r--tests/snippets/html/multiline-comment-catastrophic-backtracking.txt34
-rw-r--r--tests/snippets/http/test_application_calendar_xml.txt28
-rw-r--r--tests/snippets/http/test_application_xml.txt28
-rw-r--r--tests/snippets/http/test_http_status_line.txt12
-rw-r--r--tests/snippets/http/test_http_status_line_without_reason_phrase.txt10
-rw-r--r--tests/snippets/http/test_http_status_line_without_reason_phrase_rfc_7230.txt11
-rw-r--r--tests/snippets/idris/test_compiler_directive.txt20
-rw-r--r--tests/snippets/idris/test_reserved_word.txt29
-rw-r--r--tests/snippets/ini/test_indented_entries_1.txt16
-rw-r--r--tests/snippets/ini/test_indented_entries_2.txt20
-rw-r--r--tests/snippets/ini/test_indented_entries_3.txt20
-rw-r--r--tests/snippets/j/test_deal_operator.txt8
-rw-r--r--tests/snippets/j/test_deal_operator_fixed_seed.txt9
-rw-r--r--tests/snippets/java/test_default.txt36
-rw-r--r--tests/snippets/java/test_enhanced_for.txt22
-rw-r--r--tests/snippets/java/test_multiline_string.txt185
-rw-r--r--tests/snippets/java/test_multiline_string_only.txt46
-rw-r--r--tests/snippets/java/test_numeric_literals.txt34
-rw-r--r--tests/snippets/java/test_record.txt67
-rw-r--r--tests/snippets/js/super.txt72
-rw-r--r--tests/snippets/jslt/test_sample.txt83
-rw-r--r--tests/snippets/json/test_basic.txt30
-rw-r--r--tests/snippets/json/test_basic_bare.txt23
-rw-r--r--tests/snippets/julia-repl/test_repl.txt51
-rw-r--r--tests/snippets/julia/test_keywords.txt101
-rw-r--r--tests/snippets/julia/test_macros.txt56
-rw-r--r--tests/snippets/julia/test_names.txt148
-rw-r--r--tests/snippets/julia/test_numbers.txt261
-rw-r--r--tests/snippets/julia/test_operators.txt172
-rw-r--r--tests/snippets/julia/test_strings.txt225
-rw-r--r--tests/snippets/julia/test_symbols.txt78
-rw-r--r--tests/snippets/julia/test_types.txt196
-rw-r--r--tests/snippets/julia/test_unicode.txt37
-rw-r--r--tests/snippets/kotlin/test_can_cope_generics_in_destructuring.txt27
-rw-r--r--tests/snippets/kotlin/test_can_cope_with_backtick_names_in_functions.txt8
-rw-r--r--tests/snippets/kotlin/test_can_cope_with_commas_and_dashes_in_backtick_Names.txt8
-rw-r--r--tests/snippets/kotlin/test_can_cope_with_destructuring.txt16
-rw-r--r--tests/snippets/kotlin/test_can_cope_with_generics.txt34
-rw-r--r--tests/snippets/kotlin/test_modifier_keyword.txt18
-rw-r--r--tests/snippets/kotlin/test_should_cope_with_multiline_comments.txt12
-rw-r--r--tests/snippets/kotlin/test_string_interpolation.txt35
-rw-r--r--tests/snippets/less/test_single_line_comments.txt21
-rw-r--r--tests/snippets/mason/test_handles_tags_correctly.txt69
-rw-r--r--tests/snippets/matlab/test_classes_with_properties.txt105
-rw-r--r--tests/snippets/matlab/test_command_mode.txt12
-rw-r--r--tests/snippets/matlab/test_comment_after_continuation.txt25
-rw-r--r--tests/snippets/matlab/test_dot_operator.txt10
-rw-r--r--tests/snippets/matlab/test_keywords_ended_by_newline.txt36
-rw-r--r--tests/snippets/matlab/test_line_continuation.txt25
-rw-r--r--tests/snippets/matlab/test_multiple_spaces_variable_assignment.txt13
-rw-r--r--tests/snippets/matlab/test_one_space_assignment.txt13
-rw-r--r--tests/snippets/matlab/test_operator_multiple_space.txt13
-rw-r--r--tests/snippets/matlab/test_single_line.txt18
-rw-r--r--tests/snippets/matlabsession/test_wrong_continuation.txt18
-rw-r--r--tests/snippets/mcfunction/commenting.txt173
-rw-r--r--tests/snippets/mcfunction/coordinates.txt188
-rw-r--r--tests/snippets/mcfunction/data.txt120
-rw-r--r--tests/snippets/mcfunction/difficult_1.txt56
-rw-r--r--tests/snippets/mcfunction/multiline.txt108
-rw-r--r--tests/snippets/mcfunction/selectors.txt73
-rw-r--r--tests/snippets/mcfunction/simple.txt92
-rw-r--r--tests/snippets/md/test_bold_fenced_by_asterisk.txt15
-rw-r--r--tests/snippets/md/test_bold_fenced_by_underscore.txt15
-rw-r--r--tests/snippets/md/test_bulleted_list_1.txt14
-rw-r--r--tests/snippets/md/test_bulleted_list_2.txt14
-rw-r--r--tests/snippets/md/test_bulleted_list_3.txt14
-rw-r--r--tests/snippets/md/test_bulleted_list_4.txt19
-rw-r--r--tests/snippets/md/test_code_block_fenced_by_backticks.txt15
-rw-r--r--tests/snippets/md/test_code_block_with_language.txt16
-rw-r--r--tests/snippets/md/test_escape_italics.txt23
-rw-r--r--tests/snippets/md/test_inline_code.txt36
-rw-r--r--tests/snippets/md/test_inline_code_after_block.txt19
-rw-r--r--tests/snippets/md/test_inline_code_in_list.txt26
-rw-r--r--tests/snippets/md/test_invalid_bold.txt31
-rw-r--r--tests/snippets/md/test_invalid_italics.txt31
-rw-r--r--tests/snippets/md/test_italics_and_bold.txt21
-rw-r--r--tests/snippets/md/test_italics_fenced_by_asterisk.txt15
-rw-r--r--tests/snippets/md/test_italics_fenced_by_underscore.txt15
-rw-r--r--tests/snippets/md/test_italics_no_multiline.txt10
-rw-r--r--tests/snippets/md/test_links.txt23
-rw-r--r--tests/snippets/md/test_mentions.txt10
-rw-r--r--tests/snippets/md/test_numbered_list.txt14
-rw-r--r--tests/snippets/md/test_quote.txt10
-rw-r--r--tests/snippets/md/test_reference_style_links.txt18
-rw-r--r--tests/snippets/md/test_strikethrough.txt9
-rw-r--r--tests/snippets/md/test_task_list.txt34
-rw-r--r--tests/snippets/md/test_topics.txt10
-rw-r--r--tests/snippets/mips/deprecated_substrings.txt34
-rw-r--r--tests/snippets/mips/keyword_substrings.txt254
-rw-r--r--tests/snippets/mips/variable_substrings.txt102
-rw-r--r--tests/snippets/nasm/checkid.txt32
-rw-r--r--tests/snippets/objectivec/test_literal_number_bool.txt7
-rw-r--r--tests/snippets/objectivec/test_literal_number_bool_expression.txt9
-rw-r--r--tests/snippets/objectivec/test_literal_number_expression.txt11
-rw-r--r--tests/snippets/objectivec/test_literal_number_int.txt9
-rw-r--r--tests/snippets/objectivec/test_literal_number_nested_expression.txt15
-rw-r--r--tests/snippets/objectivec/test_module_import.txt9
-rw-r--r--tests/snippets/octave/test_multilinecomment.txt27
-rw-r--r--tests/snippets/omg-idl/annotation_named_params.txt27
-rw-r--r--tests/snippets/omg-idl/enumerators.txt18
-rw-r--r--tests/snippets/peg/test_basic.txt17
-rw-r--r--tests/snippets/peg/test_modified_strings.txt21
-rw-r--r--tests/snippets/peg/test_operators.txt29
-rw-r--r--tests/snippets/php/test_backslashes_in_strings.txt28
-rw-r--r--tests/snippets/php/test_string_escaping_run.txt16
-rw-r--r--tests/snippets/powershell/test_colon_punctuation.txt35
-rw-r--r--tests/snippets/powershell/test_remoting_session.txt19
-rw-r--r--tests/snippets/powershell/test_session.txt28
-rw-r--r--tests/snippets/praat/test_broken_unquoted_string.txt16
-rw-r--r--tests/snippets/praat/test_function_call.txt20
-rw-r--r--tests/snippets/praat/test_inline_if.txt27
-rw-r--r--tests/snippets/praat/test_interpolated_indexed_numeric_with_precision.txt6
-rw-r--r--tests/snippets/praat/test_interpolated_local_numeric_with_precision.txt6
-rw-r--r--tests/snippets/praat/test_interpolated_numeric_hash.txt6
-rw-r--r--tests/snippets/praat/test_interpolated_numeric_indexed.txt6
-rw-r--r--tests/snippets/praat/test_interpolated_numeric_with_precision.txt6
-rw-r--r--tests/snippets/praat/test_interpolated_string_hash.txt6
-rw-r--r--tests/snippets/praat/test_interpolated_string_indexed.txt6
-rw-r--r--tests/snippets/praat/test_interpolation_boundary.txt14
-rw-r--r--tests/snippets/praat/test_numeric_assignment.txt11
-rw-r--r--tests/snippets/praat/test_string_assignment.txt12
-rw-r--r--tests/snippets/praat/test_string_escaped_quotes.txt13
-rw-r--r--tests/snippets/promql/test_complex_exp_single_quotes.txt35
-rw-r--r--tests/snippets/promql/test_expression_and_comment.txt15
-rw-r--r--tests/snippets/promql/test_function_delta.txt19
-rw-r--r--tests/snippets/promql/test_function_multi_line.txt80
-rw-r--r--tests/snippets/promql/test_function_multi_line_with_offset.txt87
-rw-r--r--tests/snippets/promql/test_function_sum_with_args.txt19
-rw-r--r--tests/snippets/promql/test_matching_operator_no_regex_match.txt16
-rw-r--r--tests/snippets/promql/test_metric.txt6
-rw-r--r--tests/snippets/promql/test_metric_multiple_labels.txt19
-rw-r--r--tests/snippets/promql/test_metric_multiple_labels_with_spaces.txt22
-rw-r--r--tests/snippets/promql/test_metric_one_label.txt13
-rw-r--r--tests/snippets/properties/test_comments.txt12
-rw-r--r--tests/snippets/properties/test_escaped_space_in_key.txt10
-rw-r--r--tests/snippets/properties/test_escaped_space_in_value.txt10
-rw-r--r--tests/snippets/properties/test_just_key.txt6
-rw-r--r--tests/snippets/properties/test_just_key_with_space.txt6
-rw-r--r--tests/snippets/properties/test_leading_whitespace_comments.txt6
-rw-r--r--tests/snippets/properties/test_space_delimited_kv_pair.txt8
-rw-r--r--tests/snippets/pwsh-session/test_continuation.txt124
-rw-r--r--tests/snippets/python/test_bytes_escape_codes.txt24
-rw-r--r--tests/snippets/python/test_floats.txt75
-rw-r--r--tests/snippets/python/test_fstring_01a.txt25
-rw-r--r--tests/snippets/python/test_fstring_01b.txt25
-rw-r--r--tests/snippets/python/test_fstring_02a.txt13
-rw-r--r--tests/snippets/python/test_fstring_02b.txt13
-rw-r--r--tests/snippets/python/test_fstring_03a.txt14
-rw-r--r--tests/snippets/python/test_fstring_03b.txt14
-rw-r--r--tests/snippets/python/test_fstring_04a.txt13
-rw-r--r--tests/snippets/python/test_fstring_04b.txt13
-rw-r--r--tests/snippets/python/test_fstring_05a.txt16
-rw-r--r--tests/snippets/python/test_fstring_05b.txt16
-rw-r--r--tests/snippets/python/test_fstring_06a.txt16
-rw-r--r--tests/snippets/python/test_fstring_06b.txt16
-rw-r--r--tests/snippets/python/test_fstring_07a.txt17
-rw-r--r--tests/snippets/python/test_fstring_07b.txt17
-rw-r--r--tests/snippets/python/test_fstring_08a.txt15
-rw-r--r--tests/snippets/python/test_fstring_08b.txt15
-rw-r--r--tests/snippets/python/test_fstring_09a.txt14
-rw-r--r--tests/snippets/python/test_fstring_09b.txt14
-rw-r--r--tests/snippets/python/test_fstring_10a.txt18
-rw-r--r--tests/snippets/python/test_fstring_10b.txt18
-rw-r--r--tests/snippets/python/test_fstring_11a.txt18
-rw-r--r--tests/snippets/python/test_fstring_11b.txt18
-rw-r--r--tests/snippets/python/test_fstring_12a.txt16
-rw-r--r--tests/snippets/python/test_fstring_12b.txt16
-rw-r--r--tests/snippets/python/test_fstring_13a.txt17
-rw-r--r--tests/snippets/python/test_fstring_13b.txt17
-rw-r--r--tests/snippets/python/test_fstring_14a.txt20
-rw-r--r--tests/snippets/python/test_fstring_14b.txt20
-rw-r--r--tests/snippets/python/test_fstring_15a.txt42
-rw-r--r--tests/snippets/python/test_fstring_15b.txt42
-rw-r--r--tests/snippets/python/test_fstring_16a.txt18
-rw-r--r--tests/snippets/python/test_fstring_16b.txt18
-rw-r--r--tests/snippets/python/test_fstring_17a.txt14
-rw-r--r--tests/snippets/python/test_fstring_17b.txt14
-rw-r--r--tests/snippets/python/test_fstring_18a.txt25
-rw-r--r--tests/snippets/python/test_fstring_18b.txt25
-rw-r--r--tests/snippets/python/test_fstring_19a.txt46
-rw-r--r--tests/snippets/python/test_fstring_19b.txt46
-rw-r--r--tests/snippets/python/test_fstring_20a.txt17
-rw-r--r--tests/snippets/python/test_fstring_20b.txt17
-rw-r--r--tests/snippets/python/test_fstring_21a.txt15
-rw-r--r--tests/snippets/python/test_fstring_21b.txt15
-rw-r--r--tests/snippets/python/test_fstring_22a.txt14
-rw-r--r--tests/snippets/python/test_fstring_22b.txt14
-rw-r--r--tests/snippets/python/test_fstring_23a.txt11
-rw-r--r--tests/snippets/python/test_fstring_23b.txt11
-rw-r--r--tests/snippets/python/test_fstring_24a.txt23
-rw-r--r--tests/snippets/python/test_fstring_24b.txt23
-rw-r--r--tests/snippets/python/test_fstring_25a.txt24
-rw-r--r--tests/snippets/python/test_fstring_25b.txt24
-rw-r--r--tests/snippets/python/test_fstring_26a.txt20
-rw-r--r--tests/snippets/python/test_fstring_26b.txt20
-rw-r--r--tests/snippets/python/test_fstring_27a.txt11
-rw-r--r--tests/snippets/python/test_fstring_27b.txt11
-rw-r--r--tests/snippets/python/test_fstring_28a.txt11
-rw-r--r--tests/snippets/python/test_fstring_28b.txt11
-rw-r--r--tests/snippets/python/test_fstring_29a.txt15
-rw-r--r--tests/snippets/python/test_fstring_29b.txt15
-rw-r--r--tests/snippets/python/test_fstring_30a.txt16
-rw-r--r--tests/snippets/python/test_fstring_30b.txt16
-rw-r--r--tests/snippets/python/test_fstring_31a.txt15
-rw-r--r--tests/snippets/python/test_fstring_31b.txt15
-rw-r--r--tests/snippets/python/test_fstring_32a.txt15
-rw-r--r--tests/snippets/python/test_fstring_32b.txt15
-rw-r--r--tests/snippets/python/test_fstring_33a.txt15
-rw-r--r--tests/snippets/python/test_fstring_33b.txt15
-rw-r--r--tests/snippets/python/test_fstring_34a.txt20
-rw-r--r--tests/snippets/python/test_fstring_34b.txt20
-rw-r--r--tests/snippets/python/test_fstring_35a.txt15
-rw-r--r--tests/snippets/python/test_fstring_35b.txt15
-rw-r--r--tests/snippets/python/test_fstring_36a.txt16
-rw-r--r--tests/snippets/python/test_fstring_36b.txt16
-rw-r--r--tests/snippets/python/test_needs_name.txt55
-rw-r--r--tests/snippets/python/test_pep_515.txt28
-rw-r--r--tests/snippets/python/test_raw_fstring.txt46
-rw-r--r--tests/snippets/python/test_string_escape_codes.txt20
-rw-r--r--tests/snippets/python/test_walrus_operator.txt21
-rw-r--r--tests/snippets/python2/test_cls_builtin.txt34
-rw-r--r--tests/snippets/qbasic/test_keywords_with_dollar.txt22
-rw-r--r--tests/snippets/r/test_call.txt12
-rw-r--r--tests/snippets/r/test_custom_operator.txt10
-rw-r--r--tests/snippets/r/test_dot_indexing.txt9
-rw-r--r--tests/snippets/r/test_dot_name.txt10
-rw-r--r--tests/snippets/r/test_indexing.txt9
-rw-r--r--tests/snippets/r/test_name1.txt6
-rw-r--r--tests/snippets/r/test_name2.txt8
-rw-r--r--tests/snippets/r/test_name3.txt8
-rw-r--r--tests/snippets/ruby/test_escaped_bracestring.txt19
-rw-r--r--tests/snippets/ruby/test_interpolation_nested_curly.txt56
-rw-r--r--tests/snippets/ruby/test_operator_methods.txt9
-rw-r--r--tests/snippets/ruby/test_range_syntax1.txt8
-rw-r--r--tests/snippets/ruby/test_range_syntax2.txt8
-rw-r--r--tests/snippets/ruby/test_range_syntax3.txt10
-rw-r--r--tests/snippets/rust/test_attribute.txt12
-rw-r--r--tests/snippets/rust/test_break.txt39
-rw-r--r--tests/snippets/rust/test_rawstrings.txt117
-rw-r--r--tests/snippets/scala/test_colon_colon_function_name.txt33
-rw-r--r--tests/snippets/scala/test_default_parameter.txt37
-rw-r--r--tests/snippets/scala/test_end_val.txt8
-rw-r--r--tests/snippets/scala/test_end_valx.txt8
-rw-r--r--tests/snippets/scala/test_float_with_exponents.txt12
-rw-r--r--tests/snippets/scala/test_function_operator_name.txt18
-rw-r--r--tests/snippets/scala/test_import_path.txt12
-rw-r--r--tests/snippets/scala/test_invalid_symbol_and_invalid_char.txt8
-rw-r--r--tests/snippets/scala/test_open_soft_keyword.txt12
-rw-r--r--tests/snippets/scala/test_package_name.txt11
-rw-r--r--tests/snippets/scala/test_prepend_operator.txt10
-rw-r--r--tests/snippets/scala/test_qualified_name.txt10
-rw-r--r--tests/snippets/scala/test_qualified_name_class.txt10
-rw-r--r--tests/snippets/scala/test_script_header.txt6
-rw-r--r--tests/snippets/scala/test_symbol_followed_by_op.txt7
-rw-r--r--tests/snippets/scala/test_symbol_name_ending_with_star.txt6
-rw-r--r--tests/snippets/scala/test_underscore_name.txt12
-rw-r--r--tests/snippets/scheme/keywords.txt43
-rw-r--r--tests/snippets/scheme/numbers.txt169
-rw-r--r--tests/snippets/scheme/strings.txt85
-rw-r--r--tests/snippets/shell/test_array_nums.txt14
-rw-r--r--tests/snippets/shell/test_curly_no_escape_and_quotes.txt15
-rw-r--r--tests/snippets/shell/test_curly_with_escape.txt13
-rw-r--r--tests/snippets/shell/test_end_of_line_nums.txt15
-rw-r--r--tests/snippets/shell/test_parsed_single.txt8
-rw-r--r--tests/snippets/shell/test_short_variable_names.txt26
-rw-r--r--tests/snippets/shexc/test_prefixed_name_starting_with_number.txt8
-rw-r--r--tests/snippets/smarty/test_nested_curly.txt18
-rw-r--r--tests/snippets/snbt/json.txt43
-rw-r--r--tests/snippets/snbt/literals.txt41
-rw-r--r--tests/snippets/snbt/multiline.txt56
-rw-r--r--tests/snippets/snbt/nesting.txt39
-rw-r--r--tests/snippets/snbt/quoted_keys.txt29
-rw-r--r--tests/snippets/systemverilog/test_basic.txt157
-rw-r--r--tests/snippets/systemverilog/test_classes.txt89
-rw-r--r--tests/snippets/systemverilog/test_numbers.txt158
-rw-r--r--tests/snippets/systemverilog/test_operators.txt213
-rw-r--r--tests/snippets/tcl/test_comma_and_at.txt131
-rw-r--r--tests/snippets/tcl/test_vars.txt17
-rw-r--r--tests/snippets/teal/test_comments.txt28
-rw-r--r--tests/snippets/teal/test_literals.txt28
-rw-r--r--tests/snippets/teal/test_strings.txt15
-rw-r--r--tests/snippets/terraform/test_attributes.txt155
-rw-r--r--tests/snippets/terraform/test_backend.txt44
-rw-r--r--tests/snippets/terraform/test_comment.txt64
-rw-r--r--tests/snippets/terraform/test_functions.txt56
-rw-r--r--tests/snippets/terraform/test_heredoc.txt65
-rw-r--r--tests/snippets/terraform/test_module.txt32
-rw-r--r--tests/snippets/terraform/test_resource.txt211
-rw-r--r--tests/snippets/terraform/test_types.txt94
-rw-r--r--tests/snippets/terraform/test_variable_declaration.txt41
-rw-r--r--tests/snippets/terraform/test_variable_read.txt23
-rw-r--r--tests/snippets/turtle/test_prefixed_name_starting_with_number.txt8
-rw-r--r--tests/snippets/typescript/test_function_definition.txt18
-rw-r--r--tests/snippets/unixconfig/etc_group.txt45
-rw-r--r--tests/snippets/unixconfig/etc_passwd.txt86
-rw-r--r--tests/snippets/unixconfig/etc_shadow.txt74
-rw-r--r--tests/snippets/usd/test_attribute.txt174
-rw-r--r--tests/snippets/usd/test_composition_arcs.txt101
-rw-r--r--tests/snippets/usd/test_metadata.txt36
-rw-r--r--tests/snippets/usd/test_numbers.txt21
-rw-r--r--tests/snippets/usd/test_outer_match_at_sign.txt14
-rw-r--r--tests/snippets/usd/test_outer_match_double.txt12
-rw-r--r--tests/snippets/usd/test_outer_match_single.txt12
-rw-r--r--tests/snippets/usd/test_string_multiple_line.txt20
-rw-r--r--tests/snippets/usd/test_string_priority.txt10
-rw-r--r--tests/snippets/usd/test_string_single_line.txt6
-rw-r--r--tests/snippets/vbscript/test_floats.txt34
-rw-r--r--tests/snippets/vbscript/test_floats_multiple.txt7
-rw-r--r--tests/snippets/vbscript/test_integers.txt14
-rw-r--r--tests/snippets/vbscript/test_invalid_character.txt10
-rw-r--r--tests/snippets/vbscript/test_names.txt18
-rw-r--r--tests/snippets/vbscript/test_reject_almost_float.txt7
-rw-r--r--tests/snippets/vbscript/test_unterminated_string.txt7
-rw-r--r--tests/snippets/wat/test_align_and_offset_accept_hexadecimal_numbers.txt14
-rw-r--r--tests/snippets/wat/test_comment_with_open_paren.txt10
-rw-r--r--tests/snippets/wat/test_comment_with_semicolon.txt10
-rw-r--r--tests/snippets/wat/test_i32_const_is_builtin.txt6
-rw-r--r--tests/snippets/wat/test_multiline_comment.txt11
-rw-r--r--tests/snippets/wat/test_nested_comment.txt14
-rw-r--r--tests/snippets/wat/test_string_byte_escape.txt9
-rw-r--r--tests/snippets/wat/test_string_with_escape.txt9
-rw-r--r--tests/snippets/wat/test_variable_name_pattern.txt6
-rw-r--r--tests/snippets/whiley/test_whiley_operator.txt10
-rw-r--r--tests/snippets/wren/lonely-paren.txt10
-rw-r--r--tests/snippets/xml/multiline-comment-catastrophic-backtracking.txt56
-rw-r--r--tests/snippets/yaml/test_yaml.txt13
-rw-r--r--tests/snippets/yaml/test_yaml_colon_in_key.txt11
-rw-r--r--tests/snippets/yaml/test_yaml_colon_in_key_double.txt11
-rw-r--r--tests/snippets/yaml/test_yaml_colon_in_key_start.txt11
-rw-r--r--tests/snippets/yang/test_float_value.txt11
-rw-r--r--tests/snippets/yang/test_integer_value.txt11
-rw-r--r--tests/snippets/yang/test_namespace_1.txt11
-rw-r--r--tests/snippets/yang/test_namespace_2.txt13
-rw-r--r--tests/snippets/yang/test_revision_date.txt11
-rw-r--r--tests/snippets/yang/test_string_value.txt11
-rw-r--r--tests/support/empty.py0
-rw-r--r--tests/support/html_formatter.py5
-rw-r--r--tests/support/python_lexer.py11
-rw-r--r--tests/support/structural_diff.py37
-rw-r--r--tests/support/tags36
-rw-r--r--tests/test_basic_api.py351
-rw-r--r--tests/test_cmdline.py324
-rw-r--r--tests/test_coffeescript.py52
-rw-r--r--tests/test_crystal.py80
-rw-r--r--tests/test_data.py285
-rw-r--r--tests/test_devicetree_lexer.py32
-rw-r--r--tests/test_func.py44
-rw-r--r--tests/test_groff_formatter.py40
-rw-r--r--tests/test_guess.py184
-rw-r--r--tests/test_html_formatter.py271
-rw-r--r--tests/test_html_formatter_linenos_elements.py63
-rw-r--r--tests/test_html_lexer.py131
-rw-r--r--tests/test_inherit.py101
-rw-r--r--tests/test_irc_formatter.py30
-rw-r--r--tests/test_java.py40
-rw-r--r--tests/test_javascript.py84
-rw-r--r--tests/test_latex_formatter.py107
-rw-r--r--tests/test_markdown_lexer.py178
-rw-r--r--tests/test_modeline.py20
-rw-r--r--tests/test_mysql.py273
-rw-r--r--tests/test_pangomarkup_formatter.py44
-rw-r--r--tests/test_perllexer.py190
-rw-r--r--tests/test_procfile.py40
-rw-r--r--tests/test_raw_token.py68
-rw-r--r--tests/test_regexlexer.py65
-rw-r--r--tests/test_regexopt.py102
-rw-r--r--tests/test_robotframework_lexer.py38
-rw-r--r--tests/test_rtf_formatter.py107
-rw-r--r--tests/test_ruby.py54
-rw-r--r--tests/test_sql.py115
-rw-r--r--tests/test_templates.py130
-rw-r--r--tests/test_terminal_formatter.py100
-rw-r--r--tests/test_thingsdb.py36
-rw-r--r--tests/test_tnt.py226
-rw-r--r--tests/test_token.py51
-rw-r--r--tests/test_unistring.py45
-rwxr-xr-xtests/test_usd.py64
-rw-r--r--tests/test_using_api.py39
-rw-r--r--tests/test_util.py189
-rw-r--r--tests/test_words.py366
-rw-r--r--tox.ini15
1029 files changed, 149790 insertions, 0 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..a6c6adb
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,3 @@
+[run]
+include =
+ pygments/*
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..e08d9e1
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,16 @@
+doc
+tests
+Pygments.egg-info
+TAGS
+build
+dist
+htmlcov
+venv
+**/__pycache__
+.*
+*.rst
+*.egg
+*.pyo
+.*.sw[op]
+
+!/doc/pyodide/meta.yaml
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..44a295f
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+tests/examplefiles/*/*.output linguist-generated
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
new file mode 100644
index 0000000..2d26dfb
--- /dev/null
+++ b/.github/workflows/build.yaml
@@ -0,0 +1,72 @@
+name: Pygments
+
+on: [push, pull_request]
+
+permissions:
+ contents: read # to fetch code (actions/checkout)
+
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest]
+ python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11"]
+ exclude:
+ - os: ubuntu-latest
+ python-version: "3.6"
+ include:
+ - os: ubuntu-20.04
+ python-version: "3.6"
+ max-parallel: 4
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: 'pip'
+ - name: Install package
+ run: |
+ pip install -r requirements.txt
+ pip install .
+ - name: Test package
+ run: pytest -W error
+
+ check:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ - name: Run make check
+ run: make check
+ - name: Fail if the basic checks failed
+ run: make check
+ if: runner.os == 'Linux'
+
+ check-mapfiles:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ - name: Regenerate mapfiles
+ run: make mapfiles
+ - name: Fail if mapfiles changed
+ run: |
+ if git ls-files -m | grep mapping; then
+ echo 'Please run "make mapfiles" and add the changes to a commit.'
+ exit 1
+ fi
+
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ - name: Check out regexlint
+ run: git clone https://github.com/pygments/regexlint
+ - name: Run regexlint
+ run: make regexlint REGEXLINT=`pwd`/regexlint
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
new file mode 100644
index 0000000..41adf4e
--- /dev/null
+++ b/.github/workflows/docs.yaml
@@ -0,0 +1,40 @@
+name: Docs
+
+on:
+ push:
+ branches:
+ - master
+
+permissions: {}
+jobs:
+ build:
+ permissions:
+ contents: write # to push pages branch (peaceiris/actions-gh-pages)
+
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.10"
+ - name: Checkout Pygments
+ uses: actions/checkout@v2
+ - name: Install Sphinx & WCAG contrast ratio
+ run: pip install Sphinx wcag-contrast-ratio
+ - name: Create Pyodide WASM package
+ run: cd doc && make pyodide
+ - name: Sphinx build
+ run: |
+ cd doc
+ WEBSITE_BUILD=1 make dirhtml
+ touch _build/dirhtml/.nojekyll
+ echo -e 'pygments.org\nwww.pygments.org' > _build/dirhtml/CNAME
+ echo 'Automated deployment of docs for GitHub pages.' > _build/dirhtml/README
+ - name: Deploy to repo
+ if: github.repository_owner == 'pygments'
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ deploy_key: ${{ secrets.ACTIONS_DEPLOY_KEY }}
+ external_repository: pygments/pygments.github.io
+ publish_branch: master
+ publish_dir: ./doc/_build/dirhtml
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cbfddbe
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,21 @@
+*.egg
+*.pyc
+*.pyo
+.*.sw[op]
+/.pytest_cache/
+/.idea/
+/.project
+/.tags
+/.tox/
+/.cache/
+/Pygments.egg-info/*
+/TAGS
+/build/*
+/dist/*
+/doc/_build
+/.coverage
+/htmlcov
+/.vscode
+venv/
+.venv/
+.DS_Store
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..18a32c6
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,264 @@
+Pygments is written and maintained by Georg Brandl <georg@python.org>.
+
+Major developers are Tim Hatch <tim@timhatch.com> and Armin Ronacher
+<armin.ronacher@active-4.com>.
+
+Other contributors, listed alphabetically, are:
+
+* Sam Aaron -- Ioke lexer
+* Jean Abou Samra -- LilyPond lexer
+* João Abecasis -- JSLT lexer
+* Ali Afshar -- image formatter
+* Thomas Aglassinger -- Easytrieve, JCL, Rexx, Transact-SQL and VBScript
+ lexers
+* Muthiah Annamalai -- Ezhil lexer
+* Kumar Appaiah -- Debian control lexer
+* Andreas Amann -- AppleScript lexer
+* Timothy Armstrong -- Dart lexer fixes
+* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers
+* Eiríkr Åsheim -- Uxntal lexer
+* Jeremy Ashkenas -- CoffeeScript lexer
+* José Joaquín Atria -- Praat lexer
+* Stefan Matthias Aust -- Smalltalk lexer
+* Lucas Bajolet -- Nit lexer
+* Ben Bangert -- Mako lexers
+* Max Battcher -- Darcs patch lexer
+* Thomas Baruchel -- APL lexer
+* Tim Baumann -- (Literate) Agda lexer
+* Paul Baumgart, 280 North, Inc. -- Objective-J lexer
+* Michael Bayer -- Myghty lexers
+* Thomas Beale -- Archetype lexers
+* John Benediktsson -- Factor lexer
+* Trevor Bergeron -- mIRC formatter
+* Vincent Bernat -- LessCSS lexer
+* Christopher Bertels -- Fancy lexer
+* Sébastien Bigaret -- QVT Operational lexer
+* Jarrett Billingsley -- MiniD lexer
+* Adam Blinkinsop -- Haskell, Redcode lexers
+* Stéphane Blondon -- Procfile, SGF and Sieve lexers
+* Frits van Bommel -- assembler lexers
+* Pierre Bourdon -- bugfixes
+* Martijn Braam -- Kernel log lexer, BARE lexer
+* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter
+* chebee7i -- Python traceback lexer improvements
+* Hiram Chirino -- Scaml and Jade lexers
+* Mauricio Caceres -- SAS and Stata lexers.
+* Ian Cooper -- VGL lexer
+* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers
+* Leaf Corcoran -- MoonScript lexer
+* Christopher Creutzig -- MuPAD lexer
+* Daniël W. Crompton -- Pike lexer
+* Pete Curry -- bugfixes
+* Bryan Davis -- EBNF lexer
+* Bruno Deferrari -- Shen lexer
+* Walter Dörwald -- UL4 lexer
+* Luke Drummond -- Meson lexer
+* Giedrius Dubinskas -- HTML formatter improvements
+* Owen Durni -- Haxe lexer
+* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer
+* James Edwards -- Terraform lexer
+* Nick Efford -- Python 3 lexer
+* Sven Efftinge -- Xtend lexer
+* Artem Egorkine -- terminal256 formatter
+* Matthew Fernandez -- CAmkES lexer
+* Paweł Fertyk -- GDScript lexer, HTML formatter improvements
+* Michael Ficarra -- CPSA lexer
+* James H. Fisher -- PostScript lexer
+* William S. Fulton -- SWIG lexer
+* Carlos Galdino -- Elixir and Elixir Console lexers
+* Michael Galloy -- IDL lexer
+* Naveen Garg -- Autohotkey lexer
+* Simon Garnotel -- FreeFem++ lexer
+* Laurent Gautier -- R/S lexer
+* Alex Gaynor -- PyPy log lexer
+* Richard Gerkin -- Igor Pro lexer
+* Alain Gilbert -- TypeScript lexer
+* Alex Gilding -- BlitzBasic lexer
+* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers
+* Bertrand Goetzmann -- Groovy lexer
+* Krzysiek Goj -- Scala lexer
+* Rostyslav Golda -- FloScript lexer
+* Andrey Golovizin -- BibTeX lexers
+* Matt Good -- Genshi, Cheetah lexers
+* Michał Górny -- vim modeline support
+* Alex Gosse -- TrafficScript lexer
+* Patrick Gotthardt -- PHP namespaces support
+* Hubert Gruniaux -- C and C++ lexer improvements
+* Olivier Guibe -- Asymptote lexer
+* Phil Hagelberg -- Fennel lexer
+* Florian Hahn -- Boogie lexer
+* Martin Harriman -- SNOBOL lexer
+* Matthew Harrison -- SVG formatter
+* Steven Hazel -- Tcl lexer
+* Dan Michael Heggø -- Turtle lexer
+* Aslak Hellesøy -- Gherkin lexer
+* Greg Hendershott -- Racket lexer
+* Justin Hendrick -- ParaSail lexer
+* Jordi Gutiérrez Hermoso -- Octave lexer
+* David Hess, Fish Software, Inc. -- Objective-J lexer
+* Ken Hilton -- Typographic Number Theory and Arrow lexers
+* Varun Hiremath -- Debian control lexer
+* Rob Hoelz -- Perl 6 lexer
+* Doug Hogan -- Mscgen lexer
+* Ben Hollis -- Mason lexer
+* Max Horn -- GAP lexer
+* Fred Hornsey -- OMG IDL Lexer
+* Alastair Houghton -- Lexer inheritance facility
+* Tim Howard -- BlitzMax lexer
+* Dustin Howett -- Logos lexer
+* Ivan Inozemtsev -- Fantom lexer
+* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session,
+ MSDOS session, BC, WDiff
+* Brian R. Jackson -- Tea lexer
+* Christian Jann -- ShellSession lexer
+* Dennis Kaarsemaker -- sources.list lexer
+* Dmitri Kabak -- Inferno Limbo lexer
+* Igor Kalnitsky -- vhdl lexer
+* Colin Kennedy - USD lexer
+* Alexander Kit -- MaskJS lexer
+* Pekka Klärck -- Robot Framework lexer
+* Gerwin Klein -- Isabelle lexer
+* Eric Knibbe -- Lasso lexer
+* Stepan Koltsov -- Clay lexer
+* Oliver Kopp - Friendly grayscale style
+* Adam Koprowski -- Opa lexer
+* Benjamin Kowarsch -- Modula-2 lexer
+* Domen Kožar -- Nix lexer
+* Oleh Krekel -- Emacs Lisp lexer
+* Alexander Kriegisch -- Kconfig and AspectJ lexers
+* Marek Kubica -- Scheme lexer
+* Jochen Kupperschmidt -- Markdown processor
+* Gerd Kurzbach -- Modelica lexer
+* Jon Larimer, Google Inc. -- Smali lexer
+* Olov Lassus -- Dart lexer
+* Matt Layman -- TAP lexer
+* Kristian Lyngstøl -- Varnish lexers
+* Sylvestre Ledru -- Scilab lexer
+* Chee Sing Lee -- Flatline lexer
+* Mark Lee -- Vala lexer
+* Pete Lomax -- Phix lexer
+* Valentin Lorentz -- C++ lexer improvements
+* Ben Mabey -- Gherkin lexer
+* Angus MacArthur -- QML lexer
+* Louis Mandel -- X10 lexer
+* Louis Marchand -- Eiffel lexer
+* Simone Margaritelli -- Hybris lexer
+* Tim Martin - World of Warcraft TOC lexer
+* Kirk McDonald -- D lexer
+* Gordon McGregor -- SystemVerilog lexer
+* Stephen McKamey -- Duel/JBST lexer
+* Brian McKenna -- F# lexer
+* Charles McLaughlin -- Puppet lexer
+* Kurt McKee -- Tera Term macro lexer, PostgreSQL updates, MySQL overhaul, JSON lexer
+* Joe Eli McIlvain -- Savi lexer
+* Lukas Meuser -- BBCode formatter, Lua lexer
+* Cat Miller -- Pig lexer
+* Paul Miller -- LiveScript lexer
+* Hong Minhee -- HTTP lexer
+* Michael Mior -- Awk lexer
+* Bruce Mitchener -- Dylan lexer rewrite
+* Reuben Morais -- SourcePawn lexer
+* Jon Morton -- Rust lexer
+* Paulo Moura -- Logtalk lexer
+* Mher Movsisyan -- DTD lexer
+* Dejan Muhamedagic -- Crmsh lexer
+* Ana Nelson -- Ragel, ANTLR, R console lexers
+* Kurt Neufeld -- Markdown lexer
+* Nam T. Nguyen -- Monokai style
+* Jesper Noehr -- HTML formatter "anchorlinenos"
+* Mike Nolta -- Julia lexer
+* Avery Nortonsmith -- Pointless lexer
+* Jonas Obrist -- BBCode lexer
+* Edward O'Callaghan -- Cryptol lexer
+* David Oliva -- Rebol lexer
+* Pat Pannuto -- nesC lexer
+* Jon Parise -- Protocol buffers and Thrift lexers
+* Benjamin Peterson -- Test suite refactoring
+* Ronny Pfannschmidt -- BBCode lexer
+* Dominik Picheta -- Nimrod lexer
+* Andrew Pinkham -- RTF Formatter Refactoring
+* Clément Prévost -- UrbiScript lexer
+* Tanner Prynn -- cmdline -x option and loading lexers from files
+* Oleh Prypin -- Crystal lexer (based on Ruby lexer)
+* Nick Psaris -- K and Q lexers
+* Xidorn Quan -- Web IDL lexer
+* Elias Rabel -- Fortran fixed form lexer
+* raichoo -- Idris lexer
+* Daniel Ramirez -- GDScript lexer
+* Kashif Rasul -- CUDA lexer
+* Nathan Reed -- HLSL lexer
+* Justin Reidy -- MXML lexer
+* Norman Richards -- JSON lexer
+* Corey Richardson -- Rust lexer updates
+* Fabrizio Riguzzi -- cplint leder
+* Lubomir Rintel -- GoodData MAQL and CL lexers
+* Andre Roberge -- Tango style
+* Georg Rollinger -- HSAIL lexer
+* Michiel Roos -- TypoScript lexer
+* Konrad Rudolph -- LaTeX formatter enhancements
+* Mario Ruggier -- Evoque lexers
+* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements
+* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers
+* Matteo Sasso -- Common Lisp lexer
+* Joe Schafer -- Ada lexer
+* Max Schillinger -- TiddlyWiki5 lexer
+* Ken Schutte -- Matlab lexers
+* René Schwaiger -- Rainbow Dash style
+* Sebastian Schweizer -- Whiley lexer
+* Tassilo Schweyer -- Io, MOOCode lexers
+* Pablo Seminario -- PromQL lexer
+* Ted Shaw -- AutoIt lexer
+* Joerg Sieker -- ABAP lexer
+* Robert Simmons -- Standard ML lexer
+* Kirill Simonov -- YAML lexer
+* Corbin Simpson -- Monte lexer
+* Ville Skyttä -- ASCII armored lexer
+* Alexander Smishlajev -- Visual FoxPro lexer
+* Steve Spigarelli -- XQuery lexer
+* Jerome St-Louis -- eC lexer
+* Camil Staps -- Clean and NuSMV lexers; Solarized style
+* James Strachan -- Kotlin lexer
+* Tom Stuart -- Treetop lexer
+* Colin Sullivan -- SuperCollider lexer
+* Ben Swift -- Extempore lexer
+* tatt61880 -- Kuin lexer
+* Edoardo Tenani -- Arduino lexer
+* Tiberius Teng -- default style overhaul
+* Jeremy Thurgood -- Erlang, Squid config lexers
+* Brian Tiffin -- OpenCOBOL lexer
+* Bob Tolbert -- Hy lexer
+* Doug Torrance -- Macaulay2 lexer
+* Matthias Trute -- Forth lexer
+* Tuoa Spi T4 -- Bdd lexer
+* Erick Tryzelaar -- Felix lexer
+* Alexander Udalov -- Kotlin lexer improvements
+* Thomas Van Doren -- Chapel lexer
+* Daniele Varrazzo -- PostgreSQL lexers
+* Abe Voelker -- OpenEdge ABL lexer
+* Pepijn de Vos -- HTML formatter CTags support
+* Matthias Vallentin -- Bro lexer
+* Benoît Vinot -- AMPL lexer
+* Linh Vu Hong -- RSL lexer
+* Immanuel Washington -- Smithy lexer
+* Nathan Weizenbaum -- Haml and Sass lexers
+* Nathan Whetsell -- Csound lexers
+* Dietmar Winkler -- Modelica lexer
+* Nils Winter -- Smalltalk lexer
+* Davy Wybiral -- Clojure lexer
+* Whitney Young -- ObjectiveC lexer
+* Diego Zamboni -- CFengine3 lexer
+* Enrique Zamudio -- Ceylon lexer
+* Alex Zimin -- Nemerle lexer
+* Rob Zimmerman -- Kal lexer
+* Vincent Zurczak -- Roboconf lexer
+* Hubert Gruniaux -- C and C++ lexer improvements
+* Thomas Symalla -- AMDGPU Lexer
+* 15b3 -- Image Formatter improvements
+* Fabian Neumann -- CDDL lexer
+* Thomas Duboucher -- CDDL lexer
+* Philipp Imhof -- Pango Markup formatter
+* Thomas Voss -- Sed lexer
+* Martin Fischer -- WCAG contrast testing
+* Marc Auberer -- Spice lexer
+
+Many thanks for all contributions!
diff --git a/CHANGES b/CHANGES
new file mode 100644
index 0000000..2aa54fa
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1,2134 @@
+Pygments changelog
+==================
+
+Pull request numbers before 2.4.2 are not linked as they refer to the now defunct Bitbucket project.
+
+Version 2.14.0
+--------------
+(released January 1st, 2023)
+
+- Added lexers:
+
+ * Arturo (#2259)
+ * GAP session (#2211)
+ * Fift (#2249)
+ * func (#2232)
+ * Jsonnet (#2239)
+ * Minecraft schema (#2276)
+ * MIPS (#2228)
+ * Phix (#2222)
+ * Portugol (#2300)
+ * TL-b (#2247)
+ * World of Warcraft TOC format (#2244, #2245)
+ * Wren (#2271)
+
+- Updated lexers:
+
+ * Abap: Update keywords (#2281)
+ * Alloy: Update for Alloy 6 (#1963)
+ * C family (C, C++ and many others):
+
+ - Fix an issue where a chunk would be wrongly recognized as a function
+ definition due to braces in comments (#2210)
+ - Improve parantheses handling for function definitions (#2207, #2208)
+
+ * C#: Fix number and operator recognition (#2256, #2257)
+ * CSound: Updated builtins (#2268)
+ * F#: Add ``.fsx`` file extension (#2282)
+ * gas (GNU assembler): recognize braces as punctuation (#2230)
+ * HTTP: Add `CONNECT` keyword (#2242)
+ * Inform 6: Fix lexing of properties and doubles (#2214)
+ * INI: Allow comments that are not their own line (#2217, #2161)
+ * Java properties: Fix issue with whitespace-delimited keys, support
+ comments starting with `!` and escapes, no longer support undocumented
+ `;` and `//` comments (#2241)
+ * LilyPond: Improve heuristics, add ``\maxima`` duration (#2283)
+ * LLVM: Add opaque pointer type (#2269)
+ * Macaulay2: Update keywords (#2305)
+ * Minecraft-related lexers (SNB and Minecraft function) moved to
+ ``pygments.lexers.minecraft`` (#2276)
+ * Nim: General improvements (#1970)
+ * Nix: Fix single quotes inside indented strings (#2289)
+ * Objective J: Fix catastrophic backtracking (#2225)
+ * NASM: Add support for SSE/AVX/AVX-512 registers as well as 'rel'
+ and 'abs' address operators (#2212)
+ * Powershell:
+
+ - Add ``local:`` keyword (#2254)
+ - Allow continuations without markers (#2262, #2263)
+
+ * Solidity: Add boolean operators (#2292)
+ * Spice: Add ``enum`` keyword and fix a bug regarding binary,
+ hexadecimal and octal number tokens (#2227)
+ * YAML: Accept colons in key names (#2277)
+
+- Fix `make mapfiles` when Pygments is not installed in editable mode
+ (#2223)
+
+- Support more filetypes and compression types in `autopygmentize` (#2219)
+- Merge consecutive tokens in Autohotkey, Clay (#2248)
+- Add ``.nasm`` as a recognized file type for NASM (#2280)
+- Add ``*Spec.hs`` as a recognized file type for ``HSpec`` (#2308)
+- Add ``*.pyi`` (for typing stub files) as a recognized file type for
+ Python (#2331)
+- The HTML lexer no longer emits empty spans for whitespace (#2304)
+- Fix ``IRCFormatter`` inserting linenumbers incorrectly (#2270)
+
+Version 2.13.0
+--------------
+(released August 15th, 2022)
+
+- Added lexers:
+
+ * COMAL-80 (#2180)
+ * JMESPath (#2174, #2175, #2179, #2182)
+ * Sql+Jinja (#2148)
+
+- Updated lexers:
+
+ * Ada: support Ada 2022 (#2121); disable recognition of namespaces
+ because it disturbs lexing of aspects (#2125)
+ * Agda: allow straight quotes in module names (#2163)
+ * C family (C, C++ and many others): allow comments between
+ elements of function headers, e.g. between the arguments and
+ the opening brace for the body (#1891)
+ * C++: Resolve several cases of ``Error`` tokens (#2207, #2208)
+ * Coq: Add some common keywords, improve recognition of ``Set``
+ and qualified identifiers (#2158)
+ * F*: Allow C-style comments anywhere in a line
+ * Fortran: Fix catastrophic backtracking with backslashes in strings
+ (#2194)
+ * Go: add support for generics (#2167)
+ * Inform: Update for version 6.40 (#2190)
+ * Isabelle: recognize cartouches (#2089)
+ * Java: support multiline strings aka. text blocks (#2132)
+ * Kotlin: Add ``value`` modifier (#2142)
+ * LilyPond: Add some missing builtins
+ * Macaulay2: Update builtins (#2139)
+ * Matlab session: fix traceback when a line continuation ellipsis
+ appears in the output (#2166)
+ * .NET: Add aliases for LibreOffice Basic, OpenOfficeBasic and
+ StarOffice Basic (#2170)
+ * Nim: Use ``Name.Builtin`` instead of ``Keyword.Type`` (#2136)
+ * PHP: fix `\"$var\"` inside strings (#2105)
+ * Python: only recognize ``\N``, ``\u`` and ``\U`` escape sequences
+ in string literals, but not in bytes literals where they are
+ not supported (#2204)
+ * Tcl: support ``${name}`` variables (#2145)
+ * Terraform: Accept leading whitespace for `<<` heredoc
+ delimiters (#2162)
+ * Teraterm: Various improvements (#2165)
+ * Spice: add support for the recently added features including more
+ builtin functions and bin, oct, hex number formats (#2206)
+
+- Added styles:
+
+ * GitHub dark (#2192)
+ * StarOffice (#2168)
+ * Nord (`nord` and `nord-darker`; #2189, #1799, #1678)
+
+- Pygments now tries to use the ``importlib.metadata`` module to
+ discover plugins instead of the slower ``pkg_resources`` (#2155). In
+ particular, this largely speeds up the ``pygmentize`` script when
+ the lexer is not specified.
+
+ ``importlib.metadata`` is only available in the Python standard
+ library since Python 3.8. For older versions, there exists an
+ ``importlib_metadata`` backport on PyPI. For this reason, Pygments
+ now defines a packaging extra ``plugins``, which adds a requirement
+ on ``importlib_metadata`` if the Python version is older than
+ 3.8. Thus, in order to install Pygments with optimal plugin
+ support even for old Python versions, you should do::
+
+ pip install pygments[plugins]
+
+ Pygments still falls back on ``pkg_resources`` if neither
+ ``importlib.metadata`` nor ``importlib_metadata`` is found, but it
+ will be slower.
+
+- Silently ignore ``BrokenPipeError`` in the command-line interface
+ (#2193).
+- The ``HtmlFormatter`` now uses the ``linespans`` attribute for
+ ``anchorlinenos`` if the ``lineanchors`` attribute is unset (#2026).
+- The ``highlight``, ``lex`` and ``format`` functions no longer
+ wrongly report "argument must be a lexer/formatter instance, not a
+ class" in some cases where this is not the actual problem (#2123).
+- Fix warnings in doc build (#2124).
+- The ``codetagify`` filter now recognizes ``FIXME`` tags by default (#2150).
+- The ``pygmentize`` command now recognizes if the ``COLORTERM``
+ environment variable is set to a value indicating that true-color
+ support is available. In that case, it uses the ``TerminalTrueColorFormatter``
+ by default (#2160)
+- Remove redundant caches for filename patterns (#2153)
+- Use new non-deprecated Pillow API for text bounding box in ``ImageFormatter``
+ (#2198)
+- Remove ``default_style`` (#930, #2183)
+- Stop treating ``DeprecationWarnings`` as errors in the unit tests (#2196)
+
+Version 2.12.0
+--------------
+(released April 24th, 2022)
+
+- Added lexers:
+
+ * Berry (#2070)
+ * Cplint (#2045)
+ * Macaulay2 (#1791)
+ * MCFunction (#2107)
+ * Minecraft (#2107)
+ * Qlik (#1925)
+ * ``UnixConfigLexer`` for "colon-separated" config files, like ``/etc/passwd`` (#2112)
+ * Uxntal (#2086)
+ * K and Q (#2073)
+
+- Updated lexers:
+
+ * Agda: Update keyword list (#2017)
+ * C family: Fix identifiers after ``case`` statements (#2084)
+ * Clojure: Highlight ratios (#2042)
+ * Csound: Update to 6.17 (#2064)
+ * CSS: Update the list of properties (#2113)
+ * Elpi:
+
+ - Fix catastrophic backtracking (#2053, #2061)
+ - Fix handling of ``->`` (#2028)
+
+ * Futhark: Add missing tokens (#2118)
+ * Gherkin: Add ``But`` (#2046)
+ * Inform6: Update to 6.36 (#2050)
+ * Jinja2: add ``.xxx.j2`` and ``.xxx.jinja2`` to relevant lexers
+ (for ``xxx`` = ``html``, ``xml``, etc.) (#2103)
+ * JSON: Support C comments in JSON (#2049). Note: This doesn't mean the JSON parser now supports JSONC or JSON5 proper, just that it doesn't error out when seeing a ``/* */`` or ``//`` style comment. If you need proper comment handling, consider using the ``JavaScript`` lexer.
+ * LilyPond:
+
+ - Fix incorrect lexing of names containing a built-in (#2071)
+ - Fix properties containing dashes (#2099)
+
+ * PHP: Update builtin function and keyword list (#2054, #2056)
+ * Python: highlight ``EncodingWarning`` (#2106)
+ * Savi: fix highlighting for underscore/private identifiers,
+ add string interpolation (#2102); fix nested type name highlighting
+ (#2110)
+ * Scheme: Various improvements (#2060)
+ * Spice: Update the keyword list, add new types (#2063, #2067)
+ * Terraform:
+
+ - Support non-idiomatic comments (#2065, #2066)
+ - Fix class name lexing (#2097)
+
+- Add ``plugins`` argument to ``get_all_lexers()``.
+- Bump minimal Python version to 3.6 (#2059)
+- Fix multiple lexers marking whitespace as ``Text`` (#2025)
+- Remove various redundant uses of ``re.UNICODE`` (#2058)
+- Associate ``.resource`` with the Robot framework (#2047)
+- Associate ``.cljc`` with Clojure (#2043)
+- Associate ``.tpp`` with C++ (#2031)
+- Remove traces of Python 2 from the documentation (#2039)
+- The ``native`` style was updated to meet the WCAG AAA contrast guidelines (#2038)
+- Fix various typos (#2030)
+- Fix ``Groff`` formatter not inheriting token styles correctly (#2024)
+- Various improvements to the CI (#2036)
+- The Ada lexer has been moved to a separate file (#2117)
+- When ``linenos=table`` is used, the ``<table>`` itself is now wrapped with a ``<div class="highlight">`` tag instead of placing it inside the ``<td class="code">`` cell (#632.) With this change, the output matches the documented behavior.
+
+.. note::
+
+ If you have subclassed ``HtmlFormatter.wrap``, you may have to adjust the logic.
+
+
+Version 2.11.2
+--------------
+(released January 6th, 2022)
+
+- Updated lexers:
+
+ * C-family: Fix incorrect handling of labels (#2022, #1996, #1182)
+ * Java: Fixed an issue with ``record`` keywords result in ``Error`` tokens in some cases (#2016, #2018)
+
+- Fix links to line numbers not working correctly (#2014)
+- Remove ``underline`` from ``Whitespace`` style in the ``Tango`` theme (#2020)
+- Fix ``IRC`` and ``Terminal256`` formatters not backtracking correctly for custom token types, resulting in some unstyled tokens (#1986)
+
+
+Version 2.11.1
+--------------
+(released December 31st, 2021)
+
+- Updated lexers:
+
+ * C-family: Handle return types with multiple tokens (e.g. ``unsigned int``) (#2008)
+ * JSON: Fix a regression which caused whitespace before ``:`` to result in ``Error`` tokens (#2010)
+ * SPICE: Various improvements (#2009)
+
+
+Version 2.11.0
+--------------
+(released December 30th, 2021)
+
+- Added lexers:
+
+ * BDD (#1803)
+ * Elpi (#1894)
+ * LilyPond (#1845, #1968, #1971, #2001). This comes with a custom style as well.
+ * Maxima (#1885)
+ * Rita (#1541, #2003)
+ * Savi (#1863)
+ * Sed (#1935)
+ * Sophia contracts (#1974)
+ * Spice (#1980)
+ * ``.SRCINFO`` (#1951)
+
+- Updated lexers:
+
+ * ABNF: Allow one-character rules (#1804)
+ * Assembly: Fix incorrect token endings (#1895, #1961)
+ * Bibtex: Distinguish between ``comment`` and ``commentary`` (#1899, #1806)
+ * C family: Support unicode identifiers (#1848)
+ * CDDL: Fix slow lexing speed (#1959)
+ * Debian control: Add missing fields (#1946)
+ * Devicetree: Recognize hexadecimal addresses for nodes (#1949)
+ * GDScript: Add ``void`` data type (#1948)
+ * GSQL
+
+ - Fix comment handling (#2002)
+ - Fix catastrophic backtracking (#2006)
+
+ * HTML, XML: Improve comment handling (#1896)
+ * Java: Add ``yield`` (#1941) and sealed classes/record (#1902)
+ * Makefiles (#1860, #1898)
+ * objdump-nasm: Improve handling of ``--no-show-raw-insn`` dumps (#1981)
+ * Prolog: Support escaped ``\`` inside quoted strings (#1479)
+ * Python:
+
+ - Support ``~`` in tracebacks (#2004)
+ - Support the pattern matching keywords (#1797, #1994)
+
+ * RobotFramework: Improve empty brace handling (#1921, #1922)
+ * Terraform
+
+ - Add the 'set' type (#1909)
+ - Support heredocs (#1909)
+
+- Added styles:
+
+ * Dracula (#1796)
+ * Friendly Grayscale (#1040, #1273)
+ * LilyPond (#1845) -- to be used for the ``LilyPond`` language.
+ * One-Dark (#1924, #1979)
+
+.. note::
+
+ All of the new styles unfortunately do not conform to WCAG recommendations.
+
+- There is new infrastructure in place to improve style accessibility. The default style has been updated to conform to WCAG recommendations. All styles are now checked for sufficient contrast by default to prevent regressions. (#1919, #1937, #1938, #1940)
+- Clean up unused imports (#1887)
+- Fix multiple lexers producing repeated single-character tokens
+- Fix multiple lexers marking whitespace as ``Text`` (#1237, #1905, #1908, #1914, #1911, #1923, #1939, #1957, #1978)
+- Remove duplicated assignments in the Paraiso style (#1934)
+- ``pygmentize`` supports JSON output for the various list functions now, making it easier to consume them from scripts. (#1437, #1890)
+- Use the ``shell`` lexer for ``kshrc`` files (#1947)
+- Use the ``ruby`` lexer for ``Vagrantfile`` files (#1936)
+- Use the C lexer for ``.xbm`` and ``.xpm`` files (#1802)
+- Add a ``groff`` formatter (#1873)
+- Update documentation (#1928)
+- Line anchors now link to themselves (#1973)
+- Add official support for Python 3.10 (#1917)
+- Fix several missing colors in dark styles: Gruvbox dark, Monokai, Rrt, Sas, Strata dark (#1955)
+- Associate more file types with ``man`` pages
+- The ``HtmlFormatter`` can now emit tooltips for each token to ease debugging of lexers (#1822)
+- Add ``f90`` as an alias for ``fortran`` (#2000)
+
+
+Version 2.10.0
+--------------
+(released August 15th, 2021)
+
+- Added lexers:
+
+ * ASC armored files (#1807)
+ * GSQL (#1809, #1866)
+ * Javascript REPL (#1825)
+ * procfile (#1808)
+ * Smithy (#1878, #1879)
+
+- Updated lexers:
+
+ * C-family: Fix preprocessor token issues (#1830)
+ * C# (#1573, #1869)
+ * CSound (#1837)
+ * Fennel (#1862)
+ * JavaScript (#1741, #1814)
+ * LLVM (#1824)
+ * Python (#1852)
+ * Rust
+
+ - Fix lexing of "break" and "continue" (#1843)
+ - Improve attribute handling (#1813)
+
+ * Scala: Add support for the ``\`` operator (#1857)
+ * Swift (#1767, #1842)
+ * Tcl: Allow ``,`` and ``@`` in strings (#1834, #1742)
+ * TOML (#1870, #1872)
+
+- Fix assert statements in TNT lexer.
+- Token types across all lexers have been unified (using the most common token
+ type name) (#1816, #1819)
+- Improve Jasmin min score analysis (#1619)
+- Add new alias for Go files (#1827)
+- Fix multi-line console highlighting (#1833)
+- Add a new trivial lexer which outputs everything as `Text.Generic.Output` (#1835, #1836)
+- Use the ``.ini`` lexer for ``systemd`` files (#1849)
+- Fix a ``FutureWarning`` related to ``words()`` (#1854)
+- ``pwsh`` is now recognized as an alias for PowerShell (#1876)
+
+
+Version 2.9.0
+-------------
+(released May 3rd, 2021)
+
+- Added lexers:
+
+ * APDL, gcode (#1714)
+ * Kuin (#1300)
+ * NestedText (#1578)
+ * OMG IDL (#1595)
+ * TEAL (#1671)
+ * ThingsDB (#1295)
+ * WebAssembly (#1416, #1564)
+
+- Updated lexers:
+
+ * AMDGPU (#1717, #1775)
+ * APL (#1747)
+ * C/C++: Improve namespace handling (#1722, #1561, #1719, #1746)
+ * Chapel (#1743)
+ * Coq (#1721)
+ * Cython (#853)
+ * DeviceTree (#1755)
+ * Groovy (#1765)
+ * Julia (#1715)
+ * Octave: Allow multiline and block-percent comments (#1726)
+ * PowerShell: Improve lexing of ``:`` (#1682, #1758)
+ * PromQL (#1783)
+ * Python: Improve float parsing (#1768, #1740)
+ * Rust (#1061)
+ * Scala: Rewrite to support Scala3 (#1694, #1035, #1121)
+ * Terraform: Support 0.14 syntax (#1756)
+ * Velocity: Detect multi-line patterns (#1776)
+
+- Add Pango formatter (#1727)
+- Autopygmentize uses ``file`` first instead of ``pygments -N`` (#1786)
+- Fix links (#1716)
+- Fix issue with LaTeX formatter and ``minted`` (#1734, #1735, #1736, #1737)
+- Improve alias order (#1780)
+- Improve line number colors (#1779, #1778)
+- Fix CTag related issue (#1724)
+- Recognize ``.leex`` as Elixir templates
+- Fix incorrect variable being accessed (#1748)
+
+- Updated `filename` handling in HTML formatter if `linenos='table'` (#1757)
+
+ * Previously the filename would be emitted within the `<td>` holding the
+ code, but outside the `<pre>`. This would invariably break the alignment
+ with line numbers.
+ * Now if `filename` is specified, a separate `<tr>` is emitted before the
+ table content which contains a single `<th>` with `colspan=2` so it
+ spans both the line number and code columns. The filename is still
+ within `<span class="filename">...</span>` so any existing styles
+ should still apply, although the CSS path may need to change.
+ * For an example of the new output format see
+ `table_cls_step_1_start_1_special_0_noanchor_filename.html`
+ in the `tests/html_linenos_expected_output/` directory.
+ * For more details and discussion see the issue
+ https://github.com/pygments/pygments/issues/1757
+
+- Added styles:
+
+ * Gruvbox light+dark (#1763)
+
+
+Version 2.8.0
+-------------
+(released February 14, 2021)
+
+- Added lexers:
+
+ * AMDGPU (#1626)
+ * CDDL (#1379, #1239)
+ * Futhark (#1691)
+ * Graphviz/DOT (#1657, #731)
+
+- Updated lexers:
+
+ * AutoIt: Support single quoted strings (#1667, #1663)
+ * C/C++ & related: Fix mishandling ``*/`` (#1695)
+ * Cocoa: Add builtin types (#1703)
+ * Console (#1672)
+ * Eiffel: Fix performance issues (#1658)
+ * Fortran: Improve combined keyword detection (#1677, #1188)
+ * J: Fix operator ``?`` lexing (#1700, #1149)
+ * JavaScript/TypeScript: Fix escapes in backtick strings (#1679, #1686)
+ * Kotlin: Improve string interpolation, modifier keyword handling, and various small issues (#1699)
+ * LESS: Support single-line comments (#1046)
+ * Matlab:
+
+ - Add support for class properties (#1466)
+ - Update builtin functions (#1705)
+ - Various cleanups (#1673)
+
+ * OpenEdge (#1696)
+ * Python: Improve handling of raw f-strings (#1681, #1683)
+ * Ruby: Better method name handling (#1531)
+ * Stata: Updated keywords (#1470)
+
+- Added styles:
+
+ * Material (#1662)
+ * Zenburn (#1659)
+
+- The `pygmentize` script now uses `argparse`, all options should work
+ as before
+
+- Add `pygmentize -C` option to guess a lexer from content
+
+- With this release, Pygments moves to a new internal testing system (#1649.)
+ See ``Contributing.md`` for details. The main advantage of this new change
+ is a much better test coverage of all existing example lexers. It also makes
+ it much easier to add new test snippets.
+- Make guessing prefer Python 3 lexer
+- Do not guess MIME or SQL without reason
+- Changed setuptools to use a declarative config through ``setup.cfg``.
+ Building Pygments now requires setuptools 39.2+.
+- Add markdown to MarkdownLexer aliases (#1687)
+- Change line number handling
+
+ * In ``<table>`` based output, the ``td.linenos`` element will have either a
+ ``normal`` or ``special`` class attached. Previously, only ``special`` line
+ numbers got a class. This prevents styles from getting applied twice -
+ once via ``<pre>``, once via ``<span class="special">``. This also means
+ that ``td.linenos pre`` is no longer styled, instead, use
+ ``td.linenos .normal`` and ``td.linenos .special``.
+ * In the "inline" style, the DOM element order was changed. The line number
+ is added first, then the line is wrapped is wrapped by the highlighter.
+ This fixes lines not being fully highlighted.
+ * The visual output for inline and non-inline line numbers & highlighting,
+ as well as class-based and inline styling is now consistent.
+ * Line number styles are set to ``background-color: transparent`` and
+ ``color: inherit`` by default. This works much better with dark styles
+ which don't have colors set for line numbers.
+
+- Remove "raw" alias from RawTokenLexer, so that it cannot be
+ selected by alias.
+- Fix RawTokenLexer to work in Python 3 and handle exceptions.
+- Add prompt colors to the Solarized theme (#1529)
+- Image formatter supports background colors now (#1374)
+- Add support for anchors in conjunction with inline line numbers (#1591)
+- Modernize the codebase using ``pyupgrade`` (#1622)
+- Add support for line numbers to the ``terminal256`` formatter (#1674, #1653)
+- Improve ``analyze_text`` logic for ``ECL`` (#1610)
+- Improve ``analyze_text`` logic for ``CBM Basic V2`` (#1607)
+- Improve LaTeX formatter (#1708, #1709)
+
+
+Version 2.7.4
+-------------
+(released January 12, 2021)
+
+- Updated lexers:
+
+ - Apache configurations: Improve handling of malformed tags (#1656)
+ - CSS: Add support for variables (#1633, #1666)
+ - Crystal (#1650, #1670)
+ - Coq (#1648)
+ - Fortran: Add missing keywords (#1635, #1665)
+ - Ini (#1624)
+ - JavaScript and variants (#1647 -- missing regex flags, #1651)
+ - Markdown (#1623, #1617)
+ - Shell
+
+ - Lex trailing whitespace as part of the prompt (#1645)
+ - Add missing ``in`` keyword (#1652)
+
+ - SQL - Fix keywords (#1668)
+ - Typescript: Fix incorrect punctuation handling (#1510, #1511)
+
+- Fix infinite loop in SML lexer (#1625), `CVE-2021-20270 <https://nvd.nist.gov/vuln/detail/CVE-2021-20270>`_
+- Fix backtracking string regexes in JavaScript/TypeScript, Modula2
+ and many other lexers (#1637) `CVE-2021-27291 <https://nvd.nist.gov/vuln/detail/CVE-2021-27291>`_
+- Limit recursion with nesting Ruby heredocs (#1638)
+- Fix a few inefficient regexes for guessing lexers
+- Fix the raw token lexer handling of Unicode (#1616)
+- Revert a private API change in the HTML formatter (#1655) --
+ please note that private APIs remain subject to change!
+- Fix several exponential/cubic-complexity regexes found by
+ Ben Caller/Doyensec (#1675)
+- Fix incorrect MATLAB example (#1582)
+
+Thanks to Google's OSS-Fuzz project for finding many of these bugs.
+
+
+Version 2.7.3
+-------------
+(released December 6, 2020)
+
+- Updated lexers:
+
+ * Ada (#1581)
+ * HTML (#1615, #1614)
+ * Java (#1594, #1586)
+ * JavaScript (#1605, #1589, #1588)
+ * JSON (#1569 -- this is a complete rewrite)
+ * Lean (#1601)
+ * LLVM (#1612)
+ * Mason (#1592)
+ * MySQL (#1555, #1551)
+ * Rust (#1608)
+ * Turtle (#1590, #1553)
+
+- Deprecated JsonBareObjectLexer, which is now identical to JsonLexer (#1600)
+- The ``ImgFormatter`` now calculates the exact character width, which fixes some issues with overlapping text (#1213, #1611)
+- Documentation fixes (#1609, #1599, #1598)
+- Fixed duplicated Juttle language alias (#1604, #1606)
+- Added support for Kotlin scripts (#1587)
+- Removed CSS rule which forced margin to 0
+
+
+Version 2.7.2
+-------------
+(released October 24, 2020)
+
+- Updated lexers:
+
+ * Latex (#1517, #1516)
+ * LLVM (#1565)
+ * SPARQL (#1559)
+
+- Fix Python console/traceback lexer problems with custom exceptions without messages (#1548)
+- Allow loading ttc fonts on Mac/image formatter (#1223)
+- Improve ``analyze_text`` across a variety of lexers (#1549)
+- Remove CSS rule which forced the vertical padding to 0 for line numbers (#1583, #1579)
+- Fix ``TNTLexer`` crashing on unexpected EOL (#1568, #1570)
+- ``regexlint`` can be now run locally as part of ``tox`` tests (#1557)
+- Fix typos (#1550, #1562)
+- Add Python 3.9 as a supported version (#1554)
+
+
+Version 2.7.1
+-------------
+(released September 16, 2020)
+
+- Fixed a regression in the JSON lexer (#1544)
+
+
+Version 2.7.0
+-------------
+(released September 12, 2020)
+
+- Added lexers:
+
+ * Arrow (#1481, #1499)
+ * BARE (#1488)
+ * Devicetree (#1434)
+ * F* (#1409)
+ * GDScript (#1457)
+ * Pointless (#1494)
+ * PromQL (#1506)
+ * PsySH (#1438)
+ * Singularity (#1285)
+ * TiddlyWiki5 (#1390)
+ * TNT (#1414)
+ * YANG (#1408, #1428)
+
+- Updated lexers:
+
+ * APL (#1503)
+ * C++ (#1350, which also fixes: #1222, #996, #906, #828, #1162, #1166,
+ #1396)
+ * Chapel (#1423)
+ * CMake (#1491)
+ * CSound (#1509)
+ * Cython (#1507)
+ * Dart (#1449)
+ * Fennel (#1535)
+ * Fortran (#1442)
+ * GAS (#1530)
+ * HTTP (#1432, #1520, #1521)
+ * Inform 6 (#1461)
+ * Javascript (#1533)
+ * JSON (#1065, #1528)
+ * Lean (#1415)
+ * Matlab (#1399)
+ * Markdown (#1492, #1495)
+ * MySQL (#975, #1063, #1453, #1527)
+ * NASM (#1465)
+ * Nim (#1426)
+ * PostgreSQL (#1513)
+ * PowerShell (#1398, #1497)
+ * Protobuf (#1505)
+ * Robot (#1480)
+ * SQL (#1402)
+ * SystemVerilog (#1436, #1452, #1454, #1460, #1462, #1463, #1464, #1471, #1496, #1504)
+ * TeraTerm (#1337)
+ * XML (#1502)
+
+- Added a new filter for math symbols (#1406)
+- The Kconfig lexer will match Kconfig derivative names now (#1458)
+- Improved HTML formatter output (#1500)
+- ``.markdown`` is now recognized as an extension for Markdown files (#1476)
+- Fixed line number colors for Solarized (#1477, #1356)
+- Improvements to exception handling (#1478)
+- Improvements to tests (#1532, #1533, #1539)
+- Various code cleanups (#1536, #1537, #1538)
+
+
+Version 2.6.1
+-------------
+(released March 8, 2020)
+
+- This release fixes a packaging issue. No functional changes.
+
+
+Version 2.6
+-----------
+(released March 8, 2020)
+
+- Running Pygments on Python 2.x is no longer supported.
+ (The Python 2 lexer still exists.)
+
+- Added lexers:
+
+ * Linux kernel logs (#1310)
+ * LLVM MIR (#1361)
+ * MiniScript (#1397)
+ * Mosel (#1287, #1326)
+ * Parsing Expression Grammar (#1336)
+ * ReasonML (#1386)
+ * Ride (#1319, #1321)
+ * Sieve (#1257)
+ * USD (#1290)
+ * WebIDL (#1309)
+
+- Updated lexers:
+
+ * Apache2 (#1378)
+ * Chapel (#1357)
+ * CSound (#1383)
+ * D (#1375, #1362)
+ * Haskell (#1347, #1177)
+ * Idris (#1360)
+ * Perl6/Raku lexer (#1344)
+ * Python3 (#1382, #1385)
+ * Rust: Updated lexer to cover more builtins (mostly macros) and miscellaneous
+ new syntax (#1320)
+ * SQL: Add temporal support keywords (#1402)
+
+- The 256-color/true-color terminal formatters now support the italic attribute
+ in styles (#1288)
+- Support HTTP 2/3 header (#1308)
+- Support missing reason in HTTP header (#1322)
+- Boogie/Silver: support line continuations and triggers, move contract keywords
+ to separate category (#1299)
+- GAS: support C-style comments (#1291)
+- Fix names in S lexer (#1330, #1333)
+- Fix numeric literals in Ada (#1334)
+- Recognize ``.mjs`` files as Javascript (#1392)
+- Recognize ``.eex`` files as Elixir (#1387)
+- Fix ``re.MULTILINE`` usage (#1388)
+- Recognize ``pipenv`` and ``poetry`` dependency & lock files (PR#1376)
+- Improve font search on Windows (#1247)
+- Remove unused script block (#1401)
+
+
+Version 2.5.2
+-------------
+(released November 29, 2019)
+
+- Fix incompatibility with some setuptools versions (PR#1316)
+
+- Fix lexing of ReST field lists (PR#1279)
+- Fix lexing of Matlab keywords as field names (PR#1282)
+- Recognize double-quoted strings in Matlab (PR#1278)
+- Avoid slow backtracking in Vim lexer (PR#1312)
+- Fix Scala highlighting of types (PR#1315)
+- Highlight field lists more consistently in ReST (PR#1279)
+- Fix highlighting Matlab keywords in field names (PR#1282)
+- Recognize Matlab double quoted strings (PR#1278)
+- Add some Terraform keywords
+- Update Modelica lexer to 3.4
+- Update Crystal examples
+
+
+Version 2.5.1
+-------------
+(released November 26, 2019)
+
+- This release fixes a packaging issue. No functional changes.
+
+
+Version 2.5.0
+-------------
+(released November 26, 2019)
+
+- Added lexers:
+
+ * Email (PR#1246)
+ * Erlang, Elixir shells (PR#823, #1521)
+ * Notmuch (PR#1264)
+ * `Scdoc <https://git.sr.ht/~sircmpwn/scdoc>`_ (PR#1268)
+ * `Solidity <https://solidity.readthedocs.io/>`_ (#1214)
+ * `Zeek <https://www.zeek.org>`_ (new name for Bro) (PR#1269)
+ * `Zig <https://ziglang.org/>`_ (PR#820)
+
+- Updated lexers:
+
+ * Apache2 Configuration (PR#1251)
+ * Bash sessions (#1253)
+ * CSound (PR#1250)
+ * Dart
+ * Dockerfile
+ * Emacs Lisp
+ * Handlebars (PR#773)
+ * Java (#1101, #987)
+ * Logtalk (PR#1261)
+ * Matlab (PR#1271)
+ * Praat (PR#1277)
+ * Python3 (PR#1255, PR#1400)
+ * Ruby
+ * YAML (#1528)
+ * Velocity
+
+- Added styles:
+
+ * Inkpot (PR#1276)
+
+- The ``PythonLexer`` class is now an alias for the former ``Python3Lexer``.
+ The old ``PythonLexer`` is available as ``Python2Lexer``. Same change has
+ been done for the ``PythonTracebackLexer``. The ``python3`` option for
+ the ``PythonConsoleLexer`` is now true by default.
+
+- Bump ``NasmLexer`` priority over ``TasmLexer`` for ``.asm`` files
+ (fixes #1326)
+- Default font in the ``ImageFormatter`` has been updated (#928, PR#1245)
+- Test suite switched to py.test, removed nose dependency (#1490)
+- Reduce ``TeraTerm`` lexer score -- it used to match nearly all languages
+ (#1256)
+- Treat ``Skylark``/``Starlark`` files as Python files (PR#1259)
+- Image formatter: actually respect ``line_number_separator`` option
+
+- Add LICENSE file to wheel builds
+- Agda: fix lambda highlighting
+- Dart: support ``@`` annotations
+- Dockerfile: accept ``FROM ... AS`` syntax
+- Emacs Lisp: add more string functions
+- GAS: accept registers in directive arguments
+- Java: make structural punctuation (braces, parens, colon, comma) ``Punctuation``, not ``Operator`` (#987)
+- Java: support ``var`` contextual keyword (#1101)
+- Matlab: Fix recognition of ``function`` keyword (PR#1271)
+- Python: recognize ``.jy`` filenames (#976)
+- Python: recognize ``f`` string prefix (#1156)
+- Ruby: support squiggly heredocs
+- Shell sessions: recognize Virtualenv prompt (PR#1266)
+- Velocity: support silent reference syntax
+
+
+Version 2.4.2
+-------------
+(released May 28, 2019)
+
+- Fix encoding error when guessing lexer with given ``encoding`` option
+ (#1438)
+
+
+Version 2.4.1
+-------------
+(released May 24, 2019)
+
+- Updated lexers:
+
+ * Coq (#1430)
+ * MSDOS Session (PR#734)
+ * NASM (#1517)
+ * Objective-C (PR#813, #1508)
+ * Prolog (#1511)
+ * TypeScript (#1515)
+
+- Support CSS variables in stylesheets (PR#814, #1356)
+- Fix F# lexer name (PR#709)
+- Fix ``TerminalFormatter`` using bold for bright text (#1480)
+
+
+Version 2.4.0
+-------------
+(released May 8, 2019)
+
+- Added lexers:
+
+ * Augeas (PR#807)
+ * BBC Basic (PR#806)
+ * Boa (PR#756)
+ * Charm++ CI (PR#788)
+ * DASM16 (PR#807)
+ * FloScript (PR#750)
+ * FreeFem++ (PR#785)
+ * Hspec (PR#790)
+ * Pony (PR#627)
+ * SGF (PR#780)
+ * Slash (PR#807)
+ * Slurm (PR#760)
+ * Tera Term Language (PR#749)
+ * TOML (PR#807)
+ * Unicon (PR#731)
+ * VBScript (PR#673)
+
+- Updated lexers:
+
+ * Apache2 (PR#766)
+ * Cypher (PR#746)
+ * LLVM (PR#792)
+ * Makefiles (PR#766)
+ * PHP (#1482)
+ * Rust
+ * SQL (PR#672)
+ * Stan (PR#774)
+ * Stata (PR#800)
+ * Terraform (PR#787)
+ * YAML
+
+- Add solarized style (PR#708)
+- Add support for Markdown reference-style links (PR#753)
+- Add license information to generated HTML/CSS files (#1496)
+- Change ANSI color names (PR#777)
+- Fix catastrophic backtracking in the bash lexer (#1494)
+- Fix documentation failing to build using Sphinx 2.0 (#1501)
+- Fix incorrect links in the Lisp and R lexer documentation (PR#775)
+- Fix rare unicode errors on Python 2.7 (PR#798, #1492)
+- Fix lexers popping from an empty stack (#1506)
+- TypoScript uses ``.typoscript`` now (#1498)
+- Updated Trove classifiers and ``pip`` requirements (PR#799)
+
+
+
+Version 2.3.1
+-------------
+(released Dec 16, 2018)
+
+- Updated lexers:
+
+ * ASM (PR#784)
+ * Chapel (PR#735)
+ * Clean (PR#621)
+ * CSound (PR#684)
+ * Elm (PR#744)
+ * Fortran (PR#747)
+ * GLSL (PR#740)
+ * Haskell (PR#745)
+ * Hy (PR#754)
+ * Igor Pro (PR#764)
+ * PowerShell (PR#705)
+ * Python (PR#720, #1299, PR#715)
+ * SLexer (PR#680)
+ * YAML (PR#762, PR#724)
+
+- Fix invalid string escape sequences
+- Fix `FutureWarning` introduced by regex changes in Python 3.7
+
+
+Version 2.3.0
+-------------
+(released Nov 25, 2018)
+
+- Added lexers:
+
+ * Fennel (PR#783)
+ * HLSL (PR#675)
+
+- Updated lexers:
+
+ * Dockerfile (PR#714)
+
+- Minimum Python versions changed to 2.7 and 3.5
+- Added support for Python 3.7 generator changes (PR#772)
+- Fix incorrect token type in SCSS for single-quote strings (#1322)
+- Use `terminal256` formatter if `TERM` contains `256` (PR#666)
+- Fix incorrect handling of GitHub style fences in Markdown (PR#741, #1389)
+- Fix `%a` not being highlighted in Python3 strings (PR#727)
+
+
+Version 2.2.0
+-------------
+(released Jan 22, 2017)
+
+- Added lexers:
+
+ * AMPL
+ * TypoScript (#1173)
+ * Varnish config (PR#554)
+ * Clean (PR#503)
+ * WDiff (PR#513)
+ * Flatline (PR#551)
+ * Silver (PR#537)
+ * HSAIL (PR#518)
+ * JSGF (PR#546)
+ * NCAR command language (PR#536)
+ * Extempore (PR#530)
+ * Cap'n Proto (PR#595)
+ * Whiley (PR#573)
+ * Monte (PR#592)
+ * Crystal (PR#576)
+ * Snowball (PR#589)
+ * CapDL (PR#579)
+ * NuSMV (PR#564)
+ * SAS, Stata (PR#593)
+
+- Added the ability to load lexer and formatter classes directly from files
+ with the `-x` command line option and the `lexers.load_lexer_from_file()`
+ and `formatters.load_formatter_from_file()` functions. (PR#559)
+
+- Added `lexers.find_lexer_class_by_name()`. (#1203)
+
+- Added new token types and lexing for magic methods and variables in Python
+ and PHP.
+
+- Added a new token type for string affixes and lexing for them in Python, C++
+ and Postgresql lexers.
+
+- Added a new token type for heredoc (and similar) string delimiters and
+ lexing for them in C++, Perl, PHP, Postgresql and Ruby lexers.
+
+- Styles can now define colors with ANSI colors for use in the 256-color
+ terminal formatter. (PR#531)
+
+- Improved the CSS lexer. (#1083, #1130)
+
+- Added "Rainbow Dash" style. (PR#623)
+
+- Delay loading `pkg_resources`, which takes a long while to import. (PR#690)
+
+
+Version 2.1.3
+-------------
+(released Mar 2, 2016)
+
+- Fixed regression in Bash lexer (PR#563)
+
+
+Version 2.1.2
+-------------
+(released Feb 29, 2016)
+
+- Fixed Python 3 regression in image formatter (#1215)
+- Fixed regression in Bash lexer (PR#562)
+
+
+Version 2.1.1
+-------------
+(released Feb 14, 2016)
+
+- Fixed Jython compatibility (#1205)
+- Fixed HTML formatter output with leading empty lines (#1111)
+- Added a mapping table for LaTeX encodings and added utf8 (#1152)
+- Fixed image formatter font searching on Macs (#1188)
+- Fixed deepcopy-ing of Token instances (#1168)
+- Fixed Julia string interpolation (#1170)
+- Fixed statefulness of HttpLexer between get_tokens calls
+- Many smaller fixes to various lexers
+
+
+Version 2.1
+-----------
+(released Jan 17, 2016)
+
+- Added lexers:
+
+ * Emacs Lisp (PR#431)
+ * Arduino (PR#442)
+ * Modula-2 with multi-dialect support (#1090)
+ * Fortran fixed format (PR#213)
+ * Archetype Definition language (PR#483)
+ * Terraform (PR#432)
+ * Jcl, Easytrieve (PR#208)
+ * ParaSail (PR#381)
+ * Boogie (PR#420)
+ * Turtle (PR#425)
+ * Fish Shell (PR#422)
+ * Roboconf (PR#449)
+ * Test Anything Protocol (PR#428)
+ * Shen (PR#385)
+ * Component Pascal (PR#437)
+ * SuperCollider (PR#472)
+ * Shell consoles (Tcsh, PowerShell, MSDOS) (PR#479)
+ * Elm and J (PR#452)
+ * Crmsh (PR#440)
+ * Praat (PR#492)
+ * CSound (PR#494)
+ * Ezhil (PR#443)
+ * Thrift (PR#469)
+ * QVT Operational (PR#204)
+ * Hexdump (PR#508)
+ * CAmkES Configuration (PR#462)
+
+- Added styles:
+
+ * Lovelace (PR#456)
+ * Algol and Algol-nu (#1090)
+
+- Added formatters:
+
+ * IRC (PR#458)
+ * True color (24-bit) terminal ANSI sequences (#1142)
+ (formatter alias: "16m")
+
+- New "filename" option for HTML formatter (PR#527).
+
+- Improved performance of the HTML formatter for long lines (PR#504).
+
+- Updated autopygmentize script (PR#445).
+
+- Fixed style inheritance for non-standard token types in HTML output.
+
+- Added support for async/await to Python 3 lexer.
+
+- Rewrote linenos option for TerminalFormatter (it's better, but slightly
+ different output than before) (#1147).
+
+- Javascript lexer now supports most of ES6 (#1100).
+
+- Cocoa builtins updated for iOS 8.1 (PR#433).
+
+- Combined BashSessionLexer and ShellSessionLexer, new version should support
+ the prompt styles of either.
+
+- Added option to pygmentize to show a full traceback on exceptions.
+
+- Fixed incomplete output on Windows and Python 3 (e.g. when using iPython
+ Notebook) (#1153).
+
+- Allowed more traceback styles in Python console lexer (PR#253).
+
+- Added decorators to TypeScript (PR#509).
+
+- Fix highlighting of certain IRC logs formats (#1076).
+
+
+Version 2.0.2
+-------------
+(released Jan 20, 2015)
+
+- Fix Python tracebacks getting duplicated in the console lexer (#1068).
+
+- Backquote-delimited identifiers are now recognized in F# (#1062).
+
+
+Version 2.0.1
+-------------
+(released Nov 10, 2014)
+
+- Fix an encoding issue when using ``pygmentize`` with the ``-o`` option.
+
+
+Version 2.0
+-----------
+(released Nov 9, 2014)
+
+- Default lexer encoding is now "guess", i.e. UTF-8 / Locale / Latin1 is
+ tried in that order.
+
+- Major update to Swift lexer (PR#410).
+
+- Multiple fixes to lexer guessing in conflicting cases:
+
+ * recognize HTML5 by doctype
+ * recognize XML by XML declaration
+ * don't recognize C/C++ as SystemVerilog
+
+- Simplified regexes and builtin lists.
+
+
+Version 2.0rc1
+--------------
+(released Oct 16, 2014)
+
+- Dropped Python 2.4 and 2.5 compatibility. This is in favor of single-source
+ compatibility between Python 2.6, 2.7 and 3.3+.
+
+- New website and documentation based on Sphinx (finally!)
+
+- Lexers added:
+
+ * APL (#969)
+ * Agda and Literate Agda (PR#203)
+ * Alloy (PR#355)
+ * AmbientTalk
+ * BlitzBasic (PR#197)
+ * ChaiScript (PR#24)
+ * Chapel (PR#256)
+ * Cirru (PR#275)
+ * Clay (PR#184)
+ * ColdFusion CFC (PR#283)
+ * Cryptol and Literate Cryptol (PR#344)
+ * Cypher (PR#257)
+ * Docker config files
+ * EBNF (PR#193)
+ * Eiffel (PR#273)
+ * GAP (PR#311)
+ * Golo (PR#309)
+ * Handlebars (PR#186)
+ * Hy (PR#238)
+ * Idris and Literate Idris (PR#210)
+ * Igor Pro (PR#172)
+ * Inform 6/7 (PR#281)
+ * Intel objdump (PR#279)
+ * Isabelle (PR#386)
+ * Jasmin (PR#349)
+ * JSON-LD (PR#289)
+ * Kal (PR#233)
+ * Lean (PR#399)
+ * LSL (PR#296)
+ * Limbo (PR#291)
+ * Liquid (#977)
+ * MQL (PR#285)
+ * MaskJS (PR#280)
+ * Mozilla preprocessors
+ * Mathematica (PR#245)
+ * NesC (PR#166)
+ * Nit (PR#375)
+ * Nix (PR#267)
+ * Pan
+ * Pawn (PR#211)
+ * Perl 6 (PR#181)
+ * Pig (PR#304)
+ * Pike (PR#237)
+ * QBasic (PR#182)
+ * Red (PR#341)
+ * ResourceBundle (#1038)
+ * Rexx (PR#199)
+ * Rql (PR#251)
+ * Rsl
+ * SPARQL (PR#78)
+ * Slim (PR#366)
+ * Swift (PR#371)
+ * Swig (PR#168)
+ * TADS 3 (PR#407)
+ * Todo.txt todo lists
+ * Twig (PR#404)
+
+- Added a helper to "optimize" regular expressions that match one of many
+ literal words; this can save 20% and more lexing time with lexers that
+ highlight many keywords or builtins.
+
+- New styles: "xcode" and "igor", similar to the default highlighting of
+ the respective IDEs.
+
+- The command-line "pygmentize" tool now tries a little harder to find the
+ correct encoding for files and the terminal (#979).
+
+- Added "inencoding" option for lexers to override "encoding" analogous
+ to "outencoding" (#800).
+
+- Added line-by-line "streaming" mode for pygmentize with the "-s" option.
+ (PR#165) Only fully works for lexers that have no constructs spanning
+ lines!
+
+- Added an "envname" option to the LaTeX formatter to select a replacement
+ verbatim environment (PR#235).
+
+- Updated the Makefile lexer to yield a little more useful highlighting.
+
+- Lexer aliases passed to ``get_lexer_by_name()`` are now case-insensitive.
+
+- File name matching in lexers and formatters will now use a regex cache
+ for speed (PR#205).
+
+- Pygments will now recognize "vim" modelines when guessing the lexer for
+ a file based on content (PR#118).
+
+- Major restructure of the ``pygments.lexers`` module namespace. There are now
+ many more modules with less lexers per module. Old modules are still around
+ and re-export the lexers they previously contained.
+
+- The NameHighlightFilter now works with any Name.* token type (#790).
+
+- Python 3 lexer: add new exceptions from PEP 3151.
+
+- Opa lexer: add new keywords (PR#170).
+
+- Julia lexer: add keywords and underscore-separated number
+ literals (PR#176).
+
+- Lasso lexer: fix method highlighting, update builtins. Fix
+ guessing so that plain XML isn't always taken as Lasso (PR#163).
+
+- Objective C/C++ lexers: allow "@" prefixing any expression (#871).
+
+- Ruby lexer: fix lexing of Name::Space tokens (#860) and of symbols
+ in hashes (#873).
+
+- Stan lexer: update for version 2.4.0 of the language (PR#162, PR#255, PR#377).
+
+- JavaScript lexer: add the "yield" keyword (PR#196).
+
+- HTTP lexer: support for PATCH method (PR#190).
+
+- Koka lexer: update to newest language spec (PR#201).
+
+- Haxe lexer: rewrite and support for Haxe 3 (PR#174).
+
+- Prolog lexer: add different kinds of numeric literals (#864).
+
+- F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with
+ dotted chains (#948).
+
+- Kotlin lexer: general update (PR#271).
+
+- Rebol lexer: fix comment detection and analyse_text (PR#261).
+
+- LLVM lexer: update keywords to v3.4 (PR#258).
+
+- PHP lexer: add new keywords and binary literals (PR#222).
+
+- external/markdown-processor.py updated to newest python-markdown (PR#221).
+
+- CSS lexer: some highlighting order fixes (PR#231).
+
+- Ceylon lexer: fix parsing of nested multiline comments (#915).
+
+- C family lexers: fix parsing of indented preprocessor directives (#944).
+
+- Rust lexer: update to 0.9 language version (PR#270, PR#388).
+
+- Elixir lexer: update to 0.15 language version (PR#392).
+
+- Fix swallowing incomplete tracebacks in Python console lexer (#874).
+
+
+Version 1.6
+-----------
+(released Feb 3, 2013)
+
+- Lexers added:
+
+ * Dylan console (PR#149)
+ * Logos (PR#150)
+ * Shell sessions (PR#158)
+
+- Fix guessed lexers not receiving lexer options (#838).
+
+- Fix unquoted HTML attribute lexing in Opa (#841).
+
+- Fixes to the Dart lexer (PR#160).
+
+
+Version 1.6rc1
+--------------
+(released Jan 9, 2013)
+
+- Lexers added:
+
+ * AspectJ (PR#90)
+ * AutoIt (PR#122)
+ * BUGS-like languages (PR#89)
+ * Ceylon (PR#86)
+ * Croc (new name for MiniD)
+ * CUDA (PR#75)
+ * Dg (PR#116)
+ * IDL (PR#115)
+ * Jags (PR#89)
+ * Julia (PR#61)
+ * Kconfig (#711)
+ * Lasso (PR#95, PR#113)
+ * LiveScript (PR#84)
+ * Monkey (PR#117)
+ * Mscgen (PR#80)
+ * NSIS scripts (PR#136)
+ * OpenCOBOL (PR#72)
+ * QML (PR#123)
+ * Puppet (PR#133)
+ * Racket (PR#94)
+ * Rdoc (PR#99)
+ * Robot Framework (PR#137)
+ * RPM spec files (PR#124)
+ * Rust (PR#67)
+ * Smali (Dalvik assembly)
+ * SourcePawn (PR#39)
+ * Stan (PR#89)
+ * Treetop (PR#125)
+ * TypeScript (PR#114)
+ * VGL (PR#12)
+ * Visual FoxPro (#762)
+ * Windows Registry (#819)
+ * Xtend (PR#68)
+
+- The HTML formatter now supports linking to tags using CTags files, when the
+ python-ctags package is installed (PR#87).
+
+- The HTML formatter now has a "linespans" option that wraps every line in a
+ <span> tag with a specific id (PR#82).
+
+- When deriving a lexer from another lexer with token definitions, definitions
+ for states not in the child lexer are now inherited. If you override a state
+ in the child lexer, an "inherit" keyword has been added to insert the base
+ state at that position (PR#141).
+
+- The C family lexers now inherit token definitions from a common base class,
+ removing code duplication (PR#141).
+
+- Use "colorama" on Windows for console color output (PR#142).
+
+- Fix Template Haskell highlighting (PR#63).
+
+- Fix some S/R lexer errors (PR#91).
+
+- Fix a bug in the Prolog lexer with names that start with 'is' (#810).
+
+- Rewrite Dylan lexer, add Dylan LID lexer (PR#147).
+
+- Add a Java quickstart document (PR#146).
+
+- Add a "external/autopygmentize" file that can be used as .lessfilter (#802).
+
+
+Version 1.5
+-----------
+(codename Zeitdilatation, released Mar 10, 2012)
+
+- Lexers added:
+
+ * Awk (#630)
+ * Fancy (#633)
+ * PyPy Log
+ * eC
+ * Nimrod
+ * Nemerle (#667)
+ * F# (#353)
+ * Groovy (#501)
+ * PostgreSQL (#660)
+ * DTD
+ * Gosu (#634)
+ * Octave (PR#22)
+ * Standard ML (PR#14)
+ * CFengine3 (#601)
+ * Opa (PR#37)
+ * HTTP sessions (PR#42)
+ * JSON (PR#31)
+ * SNOBOL (PR#30)
+ * MoonScript (PR#43)
+ * ECL (PR#29)
+ * Urbiscript (PR#17)
+ * OpenEdge ABL (PR#27)
+ * SystemVerilog (PR#35)
+ * Coq (#734)
+ * PowerShell (#654)
+ * Dart (#715)
+ * Fantom (PR#36)
+ * Bro (PR#5)
+ * NewLISP (PR#26)
+ * VHDL (PR#45)
+ * Scilab (#740)
+ * Elixir (PR#57)
+ * Tea (PR#56)
+ * Kotlin (PR#58)
+
+- Fix Python 3 terminal highlighting with pygmentize (#691).
+
+- In the LaTeX formatter, escape special &, < and > chars (#648).
+
+- In the LaTeX formatter, fix display problems for styles with token
+ background colors (#670).
+
+- Enhancements to the Squid conf lexer (#664).
+
+- Several fixes to the reStructuredText lexer (#636).
+
+- Recognize methods in the ObjC lexer (#638).
+
+- Fix Lua "class" highlighting: it does not have classes (#665).
+
+- Fix degenerate regex in Scala lexer (#671) and highlighting bugs (#713, 708).
+
+- Fix number pattern order in Ocaml lexer (#647).
+
+- Fix generic type highlighting in ActionScript 3 (#666).
+
+- Fixes to the Clojure lexer (PR#9).
+
+- Fix degenerate regex in Nemerle lexer (#706).
+
+- Fix infinite looping in CoffeeScript lexer (#729).
+
+- Fix crashes and analysis with ObjectiveC lexer (#693, #696).
+
+- Add some Fortran 2003 keywords.
+
+- Fix Boo string regexes (#679).
+
+- Add "rrt" style (#727).
+
+- Fix infinite looping in Darcs Patch lexer.
+
+- Lots of misc fixes to character-eating bugs and ordering problems in many
+ different lexers.
+
+
+Version 1.4
+-----------
+(codename Unschärfe, released Jan 03, 2011)
+
+- Lexers added:
+
+ * Factor (#520)
+ * PostScript (#486)
+ * Verilog (#491)
+ * BlitzMax Basic (#478)
+ * Ioke (#465)
+ * Java properties, split out of the INI lexer (#445)
+ * Scss (#509)
+ * Duel/JBST
+ * XQuery (#617)
+ * Mason (#615)
+ * GoodData (#609)
+ * SSP (#473)
+ * Autohotkey (#417)
+ * Google Protocol Buffers
+ * Hybris (#506)
+
+- Do not fail in analyse_text methods (#618).
+
+- Performance improvements in the HTML formatter (#523).
+
+- With the ``noclasses`` option in the HTML formatter, some styles
+ present in the stylesheet were not added as inline styles.
+
+- Four fixes to the Lua lexer (#480, #481, #482, #497).
+
+- More context-sensitive Gherkin lexer with support for more i18n translations.
+
+- Support new OO keywords in Matlab lexer (#521).
+
+- Small fix in the CoffeeScript lexer (#519).
+
+- A bugfix for backslashes in ocaml strings (#499).
+
+- Fix unicode/raw docstrings in the Python lexer (#489).
+
+- Allow PIL to work without PIL.pth (#502).
+
+- Allow seconds as a unit in CSS (#496).
+
+- Support ``application/javascript`` as a JavaScript mime type (#504).
+
+- Support `Offload <https://offload.codeplay.com/>`_ C++ Extensions as
+ keywords in the C++ lexer (#484).
+
+- Escape more characters in LaTeX output (#505).
+
+- Update Haml/Sass lexers to version 3 (#509).
+
+- Small PHP lexer string escaping fix (#515).
+
+- Support comments before preprocessor directives, and unsigned/
+ long long literals in C/C++ (#613, #616).
+
+- Support line continuations in the INI lexer (#494).
+
+- Fix lexing of Dylan string and char literals (#628).
+
+- Fix class/procedure name highlighting in VB.NET lexer (#624).
+
+
+Version 1.3.1
+-------------
+(bugfix release, released Mar 05, 2010)
+
+- The ``pygmentize`` script was missing from the distribution.
+
+
+Version 1.3
+-----------
+(codename Schneeglöckchen, released Mar 01, 2010)
+
+- Added the ``ensurenl`` lexer option, which can be used to suppress the
+ automatic addition of a newline to the lexer input.
+
+- Lexers added:
+
+ * Ada
+ * Coldfusion
+ * Modula-2
+ * Haxe
+ * R console
+ * Objective-J
+ * Haml and Sass
+ * CoffeeScript
+
+- Enhanced reStructuredText highlighting.
+
+- Added support for PHP 5.3 namespaces in the PHP lexer.
+
+- Added a bash completion script for `pygmentize`, to the external/
+ directory (#466).
+
+- Fixed a bug in `do_insertions()` used for multi-lexer languages.
+
+- Fixed a Ruby regex highlighting bug (#476).
+
+- Fixed regex highlighting bugs in Perl lexer (#258).
+
+- Add small enhancements to the C lexer (#467) and Bash lexer (#469).
+
+- Small fixes for the Tcl, Debian control file, Nginx config,
+ Smalltalk, Objective-C, Clojure, Lua lexers.
+
+- Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.
+
+
+Version 1.2.2
+-------------
+(bugfix release, released Jan 02, 2010)
+
+* Removed a backwards incompatibility in the LaTeX formatter that caused
+ Sphinx to produce invalid commands when writing LaTeX output (#463).
+
+* Fixed a forever-backtracking regex in the BashLexer (#462).
+
+
+Version 1.2.1
+-------------
+(bugfix release, released Jan 02, 2010)
+
+* Fixed mishandling of an ellipsis in place of the frames in a Python
+ console traceback, resulting in clobbered output.
+
+
+Version 1.2
+-----------
+(codename Neujahr, released Jan 01, 2010)
+
+- Dropped Python 2.3 compatibility.
+
+- Lexers added:
+
+ * Asymptote
+ * Go
+ * Gherkin (Cucumber)
+ * CMake
+ * Ooc
+ * Coldfusion
+ * Haxe
+ * R console
+
+- Added options for rendering LaTeX in source code comments in the
+ LaTeX formatter (#461).
+
+- Updated the Logtalk lexer.
+
+- Added `line_number_start` option to image formatter (#456).
+
+- Added `hl_lines` and `hl_color` options to image formatter (#457).
+
+- Fixed the HtmlFormatter's handling of noclasses=True to not output any
+ classes (#427).
+
+- Added the Monokai style (#453).
+
+- Fixed LLVM lexer identifier syntax and added new keywords (#442).
+
+- Fixed the PythonTracebackLexer to handle non-traceback data in header or
+ trailer, and support more partial tracebacks that start on line 2 (#437).
+
+- Fixed the CLexer to not highlight ternary statements as labels.
+
+- Fixed lexing of some Ruby quoting peculiarities (#460).
+
+- A few ASM lexer fixes (#450).
+
+
+Version 1.1.1
+-------------
+(bugfix release, released Sep 15, 2009)
+
+- Fixed the BBCode lexer (#435).
+
+- Added support for new Jinja2 keywords.
+
+- Fixed test suite failures.
+
+- Added Gentoo-specific suffixes to Bash lexer.
+
+
+Version 1.1
+-----------
+(codename Brillouin, released Sep 11, 2009)
+
+- Ported Pygments to Python 3. This needed a few changes in the way
+ encodings are handled; they may affect corner cases when used with
+ Python 2 as well.
+
+- Lexers added:
+
+ * Antlr/Ragel, thanks to Ana Nelson
+ * (Ba)sh shell
+ * Erlang shell
+ * GLSL
+ * Prolog
+ * Evoque
+ * Modelica
+ * Rebol
+ * MXML
+ * Cython
+ * ABAP
+ * ASP.net (VB/C#)
+ * Vala
+ * Newspeak
+
+- Fixed the LaTeX formatter's output so that output generated for one style
+ can be used with the style definitions of another (#384).
+
+- Added "anchorlinenos" and "noclobber_cssfile" (#396) options to HTML
+ formatter.
+
+- Support multiline strings in Lua lexer.
+
+- Rewrite of the JavaScript lexer by Pumbaa80 to better support regular
+ expression literals (#403).
+
+- When pygmentize is asked to highlight a file for which multiple lexers
+ match the filename, use the analyse_text guessing engine to determine the
+ winner (#355).
+
+- Fixed minor bugs in the JavaScript lexer (#383), the Matlab lexer (#378),
+ the Scala lexer (#392), the INI lexer (#391), the Clojure lexer (#387)
+ and the AS3 lexer (#389).
+
+- Fixed three Perl heredoc lexing bugs (#379, #400, #422).
+
+- Fixed a bug in the image formatter which misdetected lines (#380).
+
+- Fixed bugs lexing extended Ruby strings and regexes.
+
+- Fixed a bug when lexing git diffs.
+
+- Fixed a bug lexing the empty commit in the PHP lexer (#405).
+
+- Fixed a bug causing Python numbers to be mishighlighted as floats (#397).
+
+- Fixed a bug when backslashes are used in odd locations in Python (#395).
+
+- Fixed various bugs in Matlab and S-Plus lexers, thanks to Winston Chang (#410,
+ #411, #413, #414) and fmarc (#419).
+
+- Fixed a bug in Haskell single-line comment detection (#426).
+
+- Added new-style reStructuredText directive for docutils 0.5+ (#428).
+
+
+Version 1.0
+-----------
+(codename Dreiundzwanzig, released Nov 23, 2008)
+
+- Don't use join(splitlines()) when converting newlines to ``\n``,
+ because that doesn't keep all newlines at the end when the
+ ``stripnl`` lexer option is False.
+
+- Added ``-N`` option to command-line interface to get a lexer name
+ for a given filename.
+
+- Added Tango style, written by Andre Roberge for the Crunchy project.
+
+- Added Python3TracebackLexer and ``python3`` option to
+ PythonConsoleLexer.
+
+- Fixed a few bugs in the Haskell lexer.
+
+- Fixed PythonTracebackLexer to be able to recognize SyntaxError and
+ KeyboardInterrupt (#360).
+
+- Provide one formatter class per image format, so that surprises like::
+
+ pygmentize -f gif -o foo.gif foo.py
+
+ creating a PNG file are avoided.
+
+- Actually use the `font_size` option of the image formatter.
+
+- Fixed numpy lexer that it doesn't listen for `*.py` any longer.
+
+- Fixed HTML formatter so that text options can be Unicode
+ strings (#371).
+
+- Unified Diff lexer supports the "udiff" alias now.
+
+- Fixed a few issues in Scala lexer (#367).
+
+- RubyConsoleLexer now supports simple prompt mode (#363).
+
+- JavascriptLexer is smarter about what constitutes a regex (#356).
+
+- Add Applescript lexer, thanks to Andreas Amann (#330).
+
+- Make the codetags more strict about matching words (#368).
+
+- NginxConfLexer is a little more accurate on mimetypes and
+ variables (#370).
+
+
+Version 0.11.1
+--------------
+(released Aug 24, 2008)
+
+- Fixed a Jython compatibility issue in pygments.unistring (#358).
+
+
+Version 0.11
+------------
+(codename Straußenei, released Aug 23, 2008)
+
+Many thanks go to Tim Hatch for writing or integrating most of the bug
+fixes and new features.
+
+- Lexers added:
+
+ * Nasm-style assembly language, thanks to delroth
+ * YAML, thanks to Kirill Simonov
+ * ActionScript 3, thanks to Pierre Bourdon
+ * Cheetah/Spitfire templates, thanks to Matt Good
+ * Lighttpd config files
+ * Nginx config files
+ * Gnuplot plotting scripts
+ * Clojure
+ * POV-Ray scene files
+ * Sqlite3 interactive console sessions
+ * Scala source files, thanks to Krzysiek Goj
+
+- Lexers improved:
+
+ * C lexer highlights standard library functions now and supports C99
+ types.
+ * Bash lexer now correctly highlights heredocs without preceding
+ whitespace.
+ * Vim lexer now highlights hex colors properly and knows a couple
+ more keywords.
+ * Irc logs lexer now handles xchat's default time format (#340) and
+ correctly highlights lines ending in ``>``.
+ * Support more delimiters for perl regular expressions (#258).
+ * ObjectiveC lexer now supports 2.0 features.
+
+- Added "Visual Studio" style.
+
+- Updated markdown processor to Markdown 1.7.
+
+- Support roman/sans/mono style defs and use them in the LaTeX
+ formatter.
+
+- The RawTokenFormatter is no longer registered to ``*.raw`` and it's
+ documented that tokenization with this lexer may raise exceptions.
+
+- New option ``hl_lines`` to HTML formatter, to highlight certain
+ lines.
+
+- New option ``prestyles`` to HTML formatter.
+
+- New option *-g* to pygmentize, to allow lexer guessing based on
+ filetext (can be slowish, so file extensions are still checked
+ first).
+
+- ``guess_lexer()`` now makes its decision much faster due to a cache
+ of whether data is xml-like (a check which is used in several
+ versions of ``analyse_text()``. Several lexers also have more
+ accurate ``analyse_text()`` now.
+
+
+Version 0.10
+------------
+(codename Malzeug, released May 06, 2008)
+
+- Lexers added:
+
+ * Io
+ * Smalltalk
+ * Darcs patches
+ * Tcl
+ * Matlab
+ * Matlab sessions
+ * FORTRAN
+ * XSLT
+ * tcsh
+ * NumPy
+ * Python 3
+ * S, S-plus, R statistics languages
+ * Logtalk
+
+- In the LatexFormatter, the *commandprefix* option is now by default
+ 'PY' instead of 'C', since the latter resulted in several collisions
+ with other packages. Also, the special meaning of the *arg*
+ argument to ``get_style_defs()`` was removed.
+
+- Added ImageFormatter, to format code as PNG, JPG, GIF or BMP.
+ (Needs the Python Imaging Library.)
+
+- Support doc comments in the PHP lexer.
+
+- Handle format specifications in the Perl lexer.
+
+- Fix comment handling in the Batch lexer.
+
+- Add more file name extensions for the C++, INI and XML lexers.
+
+- Fixes in the IRC and MuPad lexers.
+
+- Fix function and interface name highlighting in the Java lexer.
+
+- Fix at-rule handling in the CSS lexer.
+
+- Handle KeyboardInterrupts gracefully in pygmentize.
+
+- Added BlackWhiteStyle.
+
+- Bash lexer now correctly highlights math, does not require
+ whitespace after semicolons, and correctly highlights boolean
+ operators.
+
+- Makefile lexer is now capable of handling BSD and GNU make syntax.
+
+
+Version 0.9
+-----------
+(codename Herbstzeitlose, released Oct 14, 2007)
+
+- Lexers added:
+
+ * Erlang
+ * ActionScript
+ * Literate Haskell
+ * Common Lisp
+ * Various assembly languages
+ * Gettext catalogs
+ * Squid configuration
+ * Debian control files
+ * MySQL-style SQL
+ * MOOCode
+
+- Lexers improved:
+
+ * Greatly improved the Haskell and OCaml lexers.
+ * Improved the Bash lexer's handling of nested constructs.
+ * The C# and Java lexers exhibited abysmal performance with some
+ input code; this should now be fixed.
+ * The IRC logs lexer is now able to colorize weechat logs too.
+ * The Lua lexer now recognizes multi-line comments.
+ * Fixed bugs in the D and MiniD lexer.
+
+- The encoding handling of the command line mode (pygmentize) was
+ enhanced. You shouldn't get UnicodeErrors from it anymore if you
+ don't give an encoding option.
+
+- Added a ``-P`` option to the command line mode which can be used to
+ give options whose values contain commas or equals signs.
+
+- Added 256-color terminal formatter.
+
+- Added an experimental SVG formatter.
+
+- Added the ``lineanchors`` option to the HTML formatter, thanks to
+ Ian Charnas for the idea.
+
+- Gave the line numbers table a CSS class in the HTML formatter.
+
+- Added a Vim 7-like style.
+
+
+Version 0.8.1
+-------------
+(released Jun 27, 2007)
+
+- Fixed POD highlighting in the Ruby lexer.
+
+- Fixed Unicode class and namespace name highlighting in the C# lexer.
+
+- Fixed Unicode string prefix highlighting in the Python lexer.
+
+- Fixed a bug in the D and MiniD lexers.
+
+- Fixed the included MoinMoin parser.
+
+
+Version 0.8
+-----------
+(codename Maikäfer, released May 30, 2007)
+
+- Lexers added:
+
+ * Haskell, thanks to Adam Blinkinsop
+ * Redcode, thanks to Adam Blinkinsop
+ * D, thanks to Kirk McDonald
+ * MuPad, thanks to Christopher Creutzig
+ * MiniD, thanks to Jarrett Billingsley
+ * Vim Script, by Tim Hatch
+
+- The HTML formatter now has a second line-numbers mode in which it
+ will just integrate the numbers in the same ``<pre>`` tag as the
+ code.
+
+- The `CSharpLexer` now is Unicode-aware, which means that it has an
+ option that can be set so that it correctly lexes Unicode
+ identifiers allowed by the C# specs.
+
+- Added a `RaiseOnErrorTokenFilter` that raises an exception when the
+ lexer generates an error token, and a `VisibleWhitespaceFilter` that
+ converts whitespace (spaces, tabs, newlines) into visible
+ characters.
+
+- Fixed the `do_insertions()` helper function to yield correct
+ indices.
+
+- The ReST lexer now automatically highlights source code blocks in
+ ".. sourcecode:: language" and ".. code:: language" directive
+ blocks.
+
+- Improved the default style (thanks to Tiberius Teng). The old
+ default is still available as the "emacs" style (which was an alias
+ before).
+
+- The `get_style_defs` method of HTML formatters now uses the
+ `cssclass` option as the default selector if it was given.
+
+- Improved the ReST and Bash lexers a bit.
+
+- Fixed a few bugs in the Makefile and Bash lexers, thanks to Tim
+ Hatch.
+
+- Fixed a bug in the command line code that disallowed ``-O`` options
+ when using the ``-S`` option.
+
+- Fixed a bug in the `RawTokenFormatter`.
+
+
+Version 0.7.1
+-------------
+(released Feb 15, 2007)
+
+- Fixed little highlighting bugs in the Python, Java, Scheme and
+ Apache Config lexers.
+
+- Updated the included manpage.
+
+- Included a built version of the documentation in the source tarball.
+
+
+Version 0.7
+-----------
+(codename Faschingskrapfn, released Feb 14, 2007)
+
+- Added a MoinMoin parser that uses Pygments. With it, you get
+ Pygments highlighting in Moin Wiki pages.
+
+- Changed the exception raised if no suitable lexer, formatter etc. is
+ found in one of the `get_*_by_*` functions to a custom exception,
+ `pygments.util.ClassNotFound`. It is, however, a subclass of
+ `ValueError` in order to retain backwards compatibility.
+
+- Added a `-H` command line option which can be used to get the
+ docstring of a lexer, formatter or filter.
+
+- Made the handling of lexers and formatters more consistent. The
+ aliases and filename patterns of formatters are now attributes on
+ them.
+
+- Added an OCaml lexer, thanks to Adam Blinkinsop.
+
+- Made the HTML formatter more flexible, and easily subclassable in
+ order to make it easy to implement custom wrappers, e.g. alternate
+ line number markup. See the documentation.
+
+- Added an `outencoding` option to all formatters, making it possible
+ to override the `encoding` (which is used by lexers and formatters)
+ when using the command line interface. Also, if using the terminal
+ formatter and the output file is a terminal and has an encoding
+ attribute, use it if no encoding is given.
+
+- Made it possible to just drop style modules into the `styles`
+ subpackage of the Pygments installation.
+
+- Added a "state" keyword argument to the `using` helper.
+
+- Added a `commandprefix` option to the `LatexFormatter` which allows
+ to control how the command names are constructed.
+
+- Added quite a few new lexers, thanks to Tim Hatch:
+
+ * Java Server Pages
+ * Windows batch files
+ * Trac Wiki markup
+ * Python tracebacks
+ * ReStructuredText
+ * Dylan
+ * and the Befunge esoteric programming language (yay!)
+
+- Added Mako lexers by Ben Bangert.
+
+- Added "fruity" style, another dark background originally vim-based
+ theme.
+
+- Added sources.list lexer by Dennis Kaarsemaker.
+
+- Added token stream filters, and a pygmentize option to use them.
+
+- Changed behavior of `in` Operator for tokens.
+
+- Added mimetypes for all lexers.
+
+- Fixed some problems lexing Python strings.
+
+- Fixed tickets: #167, #178, #179, #180, #185, #201.
+
+
+Version 0.6
+-----------
+(codename Zimtstern, released Dec 20, 2006)
+
+- Added option for the HTML formatter to write the CSS to an external
+ file in "full document" mode.
+
+- Added RTF formatter.
+
+- Added Bash and Apache configuration lexers (thanks to Tim Hatch).
+
+- Improved guessing methods for various lexers.
+
+- Added `@media` support to CSS lexer (thanks to Tim Hatch).
+
+- Added a Groff lexer (thanks to Tim Hatch).
+
+- License change to BSD.
+
+- Added lexers for the Myghty template language.
+
+- Added a Scheme lexer (thanks to Marek Kubica).
+
+- Added some functions to iterate over existing lexers, formatters and
+ lexers.
+
+- The HtmlFormatter's `get_style_defs()` can now take a list as an
+ argument to generate CSS with multiple prefixes.
+
+- Support for guessing input encoding added.
+
+- Encoding support added: all processing is now done with Unicode
+ strings, input and output are converted from and optionally to byte
+ strings (see the ``encoding`` option of lexers and formatters).
+
+- Some improvements in the C(++) lexers handling comments and line
+ continuations.
+
+
+Version 0.5.1
+-------------
+(released Oct 30, 2006)
+
+- Fixed traceback in ``pygmentize -L`` (thanks to Piotr Ozarowski).
+
+
+Version 0.5
+-----------
+(codename PyKleur, released Oct 30, 2006)
+
+- Initial public release.
diff --git a/Contributing.md b/Contributing.md
new file mode 100644
index 0000000..93da428
--- /dev/null
+++ b/Contributing.md
@@ -0,0 +1,167 @@
+Licensing
+=========
+
+The code is distributed under the BSD 2-clause license. Contributors making pull
+requests must agree that they are able and willing to put their contributions
+under that license.
+
+Goals & non-goals of Pygments
+=============================
+
+Python support
+--------------
+
+Pygments supports all supported Python versions as per the [Python Developer's Guide](https://devguide.python.org/versions/). Additionally, the default Python version of the latest stable version of RHEL, Ubuntu LTS, and Debian are supported, even if they're officially EOL. Supporting other end-of-life versions is a non-goal of Pygments.
+
+Validation
+----------
+
+Pygments does not attempt to validate the input. Accepting code that is not legal for a given language is acceptable if it simplifies the codebase and does not result in surprising behavior. For instance, in C89, accepting `//` based comments would be fine because de-facto all compilers supported it, and having a separate lexer for it would not be worth it.
+
+Contribution checklist
+======================
+
+* Check the documentation for how to write
+ [a new lexer](https://pygments.org/docs/lexerdevelopment/),
+ [a new formatter](https://pygments.org/docs/formatterdevelopment/) or
+ [a new filter](https://pygments.org/docs/filterdevelopment/)
+
+* Make sure to add a test for your new functionality, and where applicable,
+ write documentation.
+
+* When writing rules, try to merge simple rules. For instance, combine:
+
+ ```python
+ _PUNCTUATION = [
+ (r"\(", token.Punctuation),
+ (r"\)", token.Punctuation),
+ (r"\[", token.Punctuation),
+ (r"\]", token.Punctuation),
+ ("{", token.Punctuation),
+ ("}", token.Punctuation),
+ ]
+ ```
+
+ into:
+
+ ```python
+ (r"[\(\)\[\]{}]", token.Punctuation)
+ ```
+
+* Be careful with ``.*``. This matches greedily as much as it can. For instance,
+ a rule like ``@.*@`` will match the whole string ``@first@ second @third@``,
+ instead of matching ``@first@`` and ``@second@``. You can use ``@.*?@`` in
+ this case to stop early. The ``?`` tries to match _as few times_ as possible.
+
+* Beware of so-called "catastrophic backtracking". As a first example, consider
+ the regular expression ``(A+)*C``. This is equivalent to ``A*B`` regarding
+ what it matches, but *non*-matches will take very long. This is because
+ of the way the regular expression engine works. Suppose you feed it 50
+ 'A's, and a 'C' at the end. It first matches the 'A's greedily in ``A+``,
+ but finds that it cannot match the end since 'B' is not the same as 'C'.
+ Then it backtracks, removing one 'A' from the first ``A+`` and trying to
+ match the rest as another ``(A+)*``. This fails again, so it backtracks
+ further left in the input string, etc. In effect, it tries all combinations
+
+ ```
+ (AAAAAAAAAAAAAAAAA)
+ (AAAAAAAAAAAAAAAA)(A)
+ (AAAAAAAAAAAAAAA)(AA)
+ (AAAAAAAAAAAAAAA)(A)(A)
+ (AAAAAAAAAAAAAA)(AAA)
+ (AAAAAAAAAAAAAA)(AA)(A)
+ ...
+ ```
+
+ Thus, the matching has exponential complexity. In a lexer, the
+ effect is that Pygments will seemingly hang when parsing invalid
+ input.
+
+ ```python
+ >>> import re
+ >>> re.match('(A+)*B', 'A'*50 + 'C') # hangs
+ ```
+
+ As a more subtle and real-life example, here is a badly written
+ regular expression to match strings:
+
+ ```python
+ r'"(\\?.)*?"'
+ ```
+
+ If the ending quote is missing, the regular expression engine will
+ find that it cannot match at the end, and try to backtrack with less
+ matches in the ``*?``. When it finds a backslash, as it has already
+ tried the possibility ``\\.``, it tries ``.`` (recognizing it as a
+ simple character without meaning), which leads to the same
+ exponential backtracking problem if there are lots of backslashes in
+ the (invalid) input string. A good way to write this would be
+ ``r'"([^\\]|\\.)*?"'``, where the inner group can only match in one
+ way. Better yet is to use a dedicated state, which not only
+ sidesteps the issue without headaches, but allows you to highlight
+ string escapes.
+
+ ```python
+ 'root': [
+ ...,
+ (r'"', String, 'string'),
+ ...
+ ],
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'"', String, '#pop'),
+ (r'[^\\"]+', String),
+ ]
+ ```
+
+* When writing rules for patterns such as comments or strings, match as many
+ characters as possible in each token. This is an example of what not to
+ do:
+
+ ```python
+ 'comment': [
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'.', Comment.Multiline),
+ ]
+ ```
+
+ This generates one token per character in the comment, which slows
+ down the lexing process, and also makes the raw token output (and in
+ particular the test output) hard to read. Do this instead:
+
+ ```python
+ 'comment': [
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[^*]+', Comment.Multiline),
+ (r'\*', Comment.Multiline),
+ ]
+ ```
+
+* Don't add imports of your lexer anywhere in the codebase. (In case you're
+ curious about ``compiled.py`` -- this file exists for backwards compatibility
+ reasons.)
+
+* Use the standard importing convention: ``from token import Punctuation``
+
+* For test cases that assert on the tokens produced by a lexer, use tools:
+
+ * You can use the ``testcase`` formatter to produce a piece of code that
+ can be pasted into a unittest file:
+ ``python -m pygments -l lua -f testcase <<< "local a = 5"``
+
+ * Most snippets should instead be put as a sample file under
+ ``tests/snippets/<lexer_alias>/*.txt``. These files are automatically
+ picked up as individual tests, asserting that the input produces the
+ expected tokens.
+
+ To add a new test, create a file with just your code snippet under a
+ subdirectory based on your lexer's main alias. Then run
+ ``pytest --update-goldens <filename.txt>`` to auto-populate the currently
+ expected tokens. Check that they look good and check in the file.
+
+ Also run the same command whenever you need to update the test if the
+ actual produced tokens change (assuming the change is expected).
+
+ * Large test files should go in ``tests/examplefiles``. This works
+ similar to ``snippets``, but the token output is stored in a separate
+ file. Output can also be regenerated with ``--update-goldens``.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..446a1a8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2006-2022 by the respective authors (see AUTHORS file).
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..c6a8567
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,5 @@
+include Makefile CHANGES LICENSE AUTHORS
+include external/*
+recursive-include tests *
+recursive-include doc *
+recursive-include scripts *
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..e674732
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,68 @@
+#
+# Makefile for Pygments
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# Combines scripts for common tasks.
+#
+# :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+# :license: BSD, see LICENSE for details.
+#
+
+PYTHON ?= python3
+
+export PYTHONPATH = $(shell echo "$$PYTHONPATH"):$(shell python -c 'import os; print ":".join(os.path.abspath(line.strip()) for line in file("PYTHONPATH"))' 2>/dev/null)
+
+.PHONY: all check clean clean-pyc docs mapfiles \
+ pylint reindent test test-coverage \
+ tox-test tox-test-coverage regexlint
+
+all: clean-pyc check test
+
+check:
+ @$(PYTHON) scripts/check_crlf.py pygments build external
+ @$(PYTHON) scripts/detect_missing_analyse_text.py --skip-no-aliases
+ @pyflakes pygments | grep -v 'but unused' || true
+ @$(PYTHON) scripts/check_sources.py -i build -i dist -i pygments/lexers/_mapping.py \
+ -i docs/build -i pygments/formatters/_mapping.py -i pygments/unistring.py \
+ -i tests/support/empty.py
+ @$(PYTHON) scripts/count_token_references.py --minfiles=1 --maxfiles=1 \
+ --minlines=1 --maxlines=3 --subtoken
+
+clean: clean-pyc
+ -rm -rf doc/_build build Pygments.egg-info
+ -rm -f codetags.html
+
+clean-pyc:
+ find . -name '__pycache__' -exec rm -rf {} +
+
+docs:
+ make -C doc html
+
+mapfiles:
+ $(PYTHON) scripts/gen_mapfiles.py
+
+pylint:
+ @pylint --rcfile scripts/pylintrc pygments
+
+reindent:
+ @$(PYTHON) scripts/reindent.py -r -B .
+
+TEST = tests
+
+test:
+ @$(PYTHON) -m pytest $(TEST)
+
+test-coverage:
+ @$(PYTHON) -m pytest --cov --cov-report=html --cov-report=term $(TEST)
+
+tox-test:
+ @tox -- $(TEST)
+
+tox-test-coverage:
+ @tox -- --with-coverage --cover-package=pygments --cover-erase $(TEST)
+
+RLMODULES = pygments.lexers
+
+regexlint:
+ @if [ -z "$(REGEXLINT)" ]; then echo "Please set REGEXLINT=checkout path"; exit 1; fi
+ PYTHONPATH=`pwd`:$(REGEXLINT) $(PYTHON) $(REGEXLINT)/regexlint/cmdline.py $(RLMODULES)
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..204e46b
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,93 @@
+Welcome to Pygments
+===================
+
+This is the source of Pygments. It is a **generic syntax highlighter** written
+in Python that supports over 500 languages and text formats, for use in code
+hosting, forums, wikis or other applications that need to prettify source code.
+
+Installing
+----------
+
+... works as usual, use ``pip install Pygments`` to get published versions,
+or ``python setup.py install`` to install from a checkout.
+
+Documentation
+-------------
+
+... can be found online at https://pygments.org/ or created with Sphinx by ::
+
+ make docs
+
+By default, the documentation does not include the demo page, as it requires
+having Docker installed for building Pyodide. To build the documentation with
+the demo page, use ::
+
+ WEBSITE_BUILD=1 make docs
+
+The initial build might take some time, but subsequent ones should be instant
+because of Docker caching.
+
+To view the generated documentation, serve it using Python's ``http.server``
+module (this step is required for the demo to work) ::
+
+ python3 -m http.server --directory doc/_build/html
+
+
+Development
+-----------
+
+... takes place on `GitHub <https://github.com/pygments/pygments>`_, where the
+Git repository, tickets and pull requests can be viewed.
+
+Continuous testing runs on GitHub workflows:
+
+.. image:: https://github.com/pygments/pygments/workflows/Pygments/badge.svg
+ :target: https://github.com/pygments/pygments/actions?query=workflow%3APygments
+
+Contribution guidelines are found in Contributing.md_.
+
+.. _Contributing.md: https://github.com/pygments/pygments/blob/master/Contributing.md
+
+Security considerations
+-----------------------
+
+Pygments provides no guarantees on execution time, which needs to be taken
+into consideration when using Pygments to process arbitrary user inputs. For
+example, if you have a web service which uses Pygments for highlighting, there
+may be inputs which will cause the Pygments process to run "forever" and/or use
+significant amounts of memory. This can subsequently be used to perform a
+remote denial-of-service attack on the server if the processes are not
+terminated quickly.
+
+Unfortunately, it's practically impossible to harden Pygments itself against
+those issues: Some regular expressions can result in "catastrophic
+backtracking", but other bugs like incorrect matchers can also
+cause similar problems, and there is no way to find them in an automated fashion
+(short of solving the halting problem.) Pygments has extensive unit tests,
+automated randomized testing, and is also tested by `OSS-Fuzz <https://github.com/google/oss-fuzz/tree/master/projects/pygments>`_,
+but we will never be able to eliminate all bugs in this area.
+
+Our recommendations are:
+
+* Ensure that the Pygments process is *terminated* after a reasonably short
+ timeout. In general Pygments should take seconds at most for reasonably-sized
+ input.
+* *Limit* the number of concurrent Pygments processes to avoid oversubscription
+ of resources.
+
+The Pygments authors will treat any bug resulting in long processing times with
+high priority -- it's one of those things that will be fixed in a patch release.
+When reporting a bug where you suspect super-linear execution times, please make
+sure to attach an input to reproduce it.
+
+The authors
+-----------
+
+Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*, **Matthäus Chajdas** and **Jean Abou-Samra**.
+
+Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
+the `Pocoo <https://www.pocoo.org/>`_ team and **Tim Hatch**.
+
+The code is distributed under the BSD 2-clause license. Contributors making pull
+requests must agree that they are able and willing to put their contributions
+under that license.
diff --git a/description.rst b/description.rst
new file mode 100644
index 0000000..de34cf1
--- /dev/null
+++ b/description.rst
@@ -0,0 +1,18 @@
+Pygments
+~~~~~~~~
+
+Pygments is a syntax highlighting package written in Python.
+
+It is a generic syntax highlighter suitable for use in code hosting, forums,
+wikis or other applications that need to prettify source code. Highlights
+are:
+
+* a wide range of over 500 languages and other text formats is supported
+* special attention is paid to details, increasing quality by a fair amount
+* support for new languages and formats are added easily
+* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
+ formats that PIL supports and ANSI sequences
+* it is usable as a command-line tool and as a library
+
+Copyright 2006-2022 by the Pygments team, see ``AUTHORS``.
+Licensed under the BSD, see ``LICENSE`` for details. \ No newline at end of file
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..a0dcaaa
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,163 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = PYTHONPATH=.. sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean pyodide html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " pyodide to make Pyodide with currently checked out Pygments"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+pyodide:
+ $(if $(test ! -f docker), $(error "Could not find Docker. Please install that before continuing."))
+ # Enable the BuildKit backend to use the --output option.
+ DOCKER_BUILDKIT=1 docker build --file pyodide/Dockerfile --output $(BUILDDIR)/pyodide/pyodide ..
+ @echo
+ @echo "Pyodide build finished. The Pyodide artifacts are in $(BUILDDIR)/pyodide."
+
+html:
+ $(if $(WEBSITE_BUILD), $(MAKE) pyodide)
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(if $(WEBSITE_BUILD), $(MAKE) pyodide)
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pygments.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pygments.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/Pygments"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pygments"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/_static/demo-worker.js b/doc/_static/demo-worker.js
new file mode 100644
index 0000000..22b8b3d
--- /dev/null
+++ b/doc/_static/demo-worker.js
@@ -0,0 +1,74 @@
+importScripts('/_static/pyodide/pyodide.js');
+
+async function loadPyodideAndPygments() {
+ self.pyodide = await loadPyodide();
+ await self.pyodide.loadPackage(["Pygments"]);
+ const styles = self.pyodide.runPython(`
+ from pygments.formatters.html import HtmlFormatter
+ from pygments.styles import STYLE_MAP
+ {s: HtmlFormatter(style=s).get_style_defs('.demo-highlight') for s in STYLE_MAP}
+ `).toJs();
+ self.postMessage({loaded: {styles}})
+}
+let pyodideReadyPromise = loadPyodideAndPygments();
+
+self.onmessage = async (event) => {
+ // Make sure loading is done.
+ await pyodideReadyPromise;
+ if (event.data.highlight) {
+ self.pyodide.globals.set('code', event.data.highlight.code);
+ self.pyodide.globals.set('lexer_name', event.data.highlight.lexer);
+
+ self.pyodide.runPython(`
+ import pygments.lexers
+
+ lexer = pygments.lexers.get_lexer_by_name(lexer_name)
+ if type(code) == memoryview:
+ code = bytes(code)
+ tokens = lexer.get_tokens(code)
+ `);
+
+ const formatter = event.data.highlight.formatter;
+ if (formatter == 'html') {
+
+ const html = self.pyodide.runPython(`
+ import io
+ from pygments.formatters.html import HtmlFormatter
+
+ fmter = HtmlFormatter(cssclass='demo-highlight')
+ buf = io.StringIO()
+ fmter.format(tokens, buf)
+ buf.getvalue()
+ `);
+ self.postMessage({html});
+ } else if (formatter == 'tokens') {
+ const tokens = self.pyodide.runPython('list(tokens)').toJs();
+ self.postMessage({tokens});
+ } else {
+ console.warn('unknown formatter:', formatter);
+ }
+ } else if (event.data.guess_lexer) {
+ self.pyodide.globals.set('code', event.data.guess_lexer.code);
+ self.pyodide.globals.set('filename', event.data.guess_lexer.filename);
+ const lexer = self.pyodide.runPython(`
+ import sys
+ sys.setrecursionlimit(1000)
+ # TODO: remove after upgrading to Pyodide 0.19
+
+ import pygments.lexers
+ import pygments.util
+
+ if type(code) == memoryview:
+ code = bytes(code)
+
+ if filename:
+ lexer = pygments.lexers.guess_lexer_for_filename(filename, code)
+ else:
+ lexer = pygments.lexers.guess_lexer(code)
+ lexer.aliases[0]
+ `);
+ self.postMessage({lexer});
+ } else {
+ console.warn('unknown command: expected highlight or guess_lexer but received ', event.data);
+ }
+}
diff --git a/doc/_static/demo.css b/doc/_static/demo.css
new file mode 100644
index 0000000..eaa4410
--- /dev/null
+++ b/doc/_static/demo.css
@@ -0,0 +1,89 @@
+#try {
+ background-color: #f6f6f6;
+ border-radius: 0;
+ border: 1px solid #ccc;
+ margin-top: 15px;
+ margin-bottom: 10px;
+ padding: 10px 15px 5px 10px;
+ position: relative;
+}
+
+#try h2 {
+ margin-top: 0;
+}
+
+#try textarea {
+ border: 1px solid #999;
+ padding: 2px;
+ width: 100%;
+ min-height: 150px;
+ resize: vertical;
+}
+
+#hlcode {
+ margin: 10px 0;
+ max-height: 500px;
+ overflow: auto;
+ border: 1px solid #ccc;
+}
+
+#hlcode:empty {
+ display: none;
+}
+
+#hlcode pre {
+ background-color: transparent;
+ border: 0;
+ margin: 0;
+}
+#hlcode table {
+ /* unset negative margin from pygments14.css */
+ margin: unset;
+}
+
+#code-header:not([hidden]) {
+ display: flex;
+ gap: 1em;
+ padding: 0 15px;
+}
+.flex-grow-1 {
+ flex-grow: 1;
+}
+#lexer {
+ margin-right: 0.5em;
+}
+#guessed-lexer:not(:empty):before {
+ content: '(guessed ';
+}
+#guessed-lexer:not(:empty):after {
+ content: ')';
+}
+
+#loading[hidden] {
+ visibility: hidden;
+ display: flex;
+}
+
+#loading {
+ display: flex;
+ align-items: center;
+ gap: 1em;
+}
+
+#format-settings {
+ display: flex;
+ gap: 1em;
+ border-top: 1px solid #ccc;
+ padding-top: 0.5em;
+ margin-top: 0.5em;
+}
+
+.tokens code {
+ /* make whitespace visible */
+ white-space: pre;
+ background: #d9d9d9;
+}
+
+#contrast-warning {
+ color: darkred;
+}
diff --git a/doc/_static/demo.js b/doc/_static/demo.js
new file mode 100644
index 0000000..b193d10
--- /dev/null
+++ b/doc/_static/demo.js
@@ -0,0 +1,200 @@
+const loadingDiv = document.getElementById("loading");
+const langSelect = document.getElementById("lang");
+const styleSelect = document.getElementById("style");
+const formatterSelect = document.getElementById("formatter");
+const outputDiv = document.getElementById("hlcode");
+const codeHeader = document.getElementById("code-header");
+const copyLink = document.getElementById("copylink");
+const style = document.getElementById("css-style");
+const textarea = document.getElementById("code");
+const uriTooLongMsg = document.getElementById('uri-too-long');
+const contrastWarning = document.getElementById('contrast-warning');
+const fileInput = document.getElementById("file");
+const fileInputResetButton = document.getElementById('reset-file');
+
+const qvars = Object.fromEntries(new URLSearchParams(window.location.search));
+if (qvars.lexer) {
+ langSelect.value = qvars.lexer;
+}
+if (qvars.code !== undefined) {
+ textarea.value = qvars.code;
+ loadingDiv.hidden = false;
+}
+if (qvars.style !== undefined) {
+ styleSelect.value = qvars.style;
+ updateContrastWarning();
+}
+if (qvars.formatter !== undefined) {
+ formatterSelect.value = qvars.formatter;
+}
+
+styleSelect.addEventListener('change', () => {
+ if (!styles)
+ // Worker has not loaded yet.
+ return;
+ style.textContent = styles.get(styleSelect.value);
+ updateCopyLink();
+ updateContrastWarning();
+});
+
+function updateContrastWarning() {
+ contrastWarning.hidden = styleSelect.selectedOptions[0].dataset.wcag == 'aa';
+}
+
+function debounce(func, timeout) {
+ let timer;
+ return (...args) => {
+ clearTimeout(timer);
+ timer = setTimeout(() => func.apply(this, args), timeout);
+ };
+}
+
+const highlightShortDebounce = debounce(highlight, 50);
+const highlightLongDebounce = debounce(highlight, 500);
+
+function debouncedUpdate() {
+ if (fileInput.files.length > 0)
+ return;
+
+ if (textarea.value.length < 1000) {
+ highlightShortDebounce();
+ } else {
+ highlightLongDebounce();
+ }
+}
+
+langSelect.addEventListener('change', debouncedUpdate);
+textarea.addEventListener('input', debouncedUpdate);
+formatterSelect.addEventListener('change', debouncedUpdate);
+fileInput.addEventListener('change', () => {
+ fileInputResetButton.hidden = false;
+ highlight();
+});
+fileInputResetButton.hidden = fileInput.files.length == 0;
+fileInputResetButton.addEventListener('click', () => {
+ fileInputResetButton.hidden = true;
+ fileInput.value = '';
+ highlight();
+});
+
+let styles;
+
+const highlightWorker = new Worker("/_static/demo-worker.js");
+highlightWorker.onmessage = (msg) => {
+ if (msg.data.loaded) {
+ styles = msg.data.loaded.styles;
+
+ if (qvars.code !== undefined || textarea.value) {
+ loadingDiv.hidden = true;
+ highlight();
+ }
+ } else if (msg.data.html) {
+ outputDiv.innerHTML = msg.data.html;
+ codeHeader.hidden = false;
+ loadingDiv.hidden = true;
+ style.textContent = styles.get(styleSelect.value);
+ } else if (msg.data.tokens) {
+ const table = document.createElement('table');
+ table.className = 'tokens';
+ for (const [tokenType, value] of msg.data.tokens) {
+ const tr = document.createElement('tr');
+ const td1 = document.createElement('td');
+ td1.textContent = tokenType.join('.');
+ const td2 = document.createElement('td');
+ const inlineCode = document.createElement('code');
+ inlineCode.textContent = value;
+ td2.appendChild(inlineCode);
+ tr.appendChild(td1);
+ tr.appendChild(td2);
+ table.appendChild(tr);
+ }
+ outputDiv.innerHTML = '';
+ outputDiv.appendChild(table);
+
+ codeHeader.hidden = false;
+ loadingDiv.hidden = true;
+ } else if (msg.data.lexer) {
+ highlight(msg.data.lexer);
+ } else {
+ console.warn('unexpected message from highlight worker', msg);
+ }
+};
+
+function updateCopyLink() {
+ var url = document.location.origin + document.location.pathname +
+ "?" + new URLSearchParams({
+ lexer: langSelect.value,
+ style: styleSelect.value,
+ formatter: formatterSelect.value,
+ code: textarea.value,
+ }).toString()
+ if (url.length > 8201) {
+ // pygments.org is hosted on GitHub pages which does not support URIs longer than 8201
+ copyLink.hidden = true;
+ uriTooLongMsg.hidden = false;
+ } else {
+ copyLink.href = url;
+ copyLink.textContent = 'Copy link';
+ copyLink.hidden = false;
+ uriTooLongMsg.hidden = true;
+ }
+}
+
+async function highlight(guessedLexer) {
+ var lexer = langSelect.value || guessedLexer;
+ var file = fileInput.files[0];
+
+ let code;
+ if (file) {
+ code = await file.arrayBuffer();
+ } else {
+ code = textarea.value;
+ }
+
+ loadingDiv.hidden = false;
+
+ if (!lexer) {
+ const guess_lexer = {code};
+ if (file)
+ guess_lexer.filename = file.name;
+ highlightWorker.postMessage({guess_lexer});
+ document.getElementById('loading-text').textContent = 'guessing lexer...';
+ return;
+ }
+
+ document.getElementById('loading-text').textContent = 'highlighting code...';
+
+ document.getElementById('guessed-lexer').textContent = guessedLexer;
+
+ highlightWorker.postMessage({highlight: {code, lexer, formatter: formatterSelect.value}});
+
+ if (code instanceof ArrayBuffer) {
+ copyLink.hidden = true;
+ uriTooLongMsg.hidden = true;
+ } else {
+ updateCopyLink();
+ }
+}
+
+copyLink.addEventListener('click', async (e) => {
+ e.preventDefault();
+ await navigator.clipboard.writeText(e.target.href);
+});
+
+function download_code() {
+ var filename = "highlighted.html";
+ var hlcode = document.getElementById("hlcode").innerHTML + style.outerHTML;
+ var blob = new Blob([hlcode], {type: 'text/html'});
+ if (window.navigator.msSaveOrOpenBlob) {
+ window.navigator.msSaveBlob(blob, filename);
+ }
+ else{
+ var elem = window.document.createElement('a');
+ elem.href = window.URL.createObjectURL(blob);
+ elem.download = filename;
+ document.body.appendChild(elem);
+ elem.click();
+ document.body.removeChild(elem);
+ window.URL.revokeObjectURL(elem.href);
+ }
+}
diff --git a/doc/_static/favicon.ico b/doc/_static/favicon.ico
new file mode 100644
index 0000000..777f617
--- /dev/null
+++ b/doc/_static/favicon.ico
Binary files differ
diff --git a/doc/_static/github.png b/doc/_static/github.png
new file mode 100644
index 0000000..5d146ad
--- /dev/null
+++ b/doc/_static/github.png
Binary files differ
diff --git a/doc/_static/logo_new.png b/doc/_static/logo_new.png
new file mode 100644
index 0000000..0ae4b20
--- /dev/null
+++ b/doc/_static/logo_new.png
Binary files differ
diff --git a/doc/_static/logo_only.png b/doc/_static/logo_only.png
new file mode 100644
index 0000000..fdebcc4
--- /dev/null
+++ b/doc/_static/logo_only.png
Binary files differ
diff --git a/doc/_static/spinner.gif b/doc/_static/spinner.gif
new file mode 100644
index 0000000..2212db9
--- /dev/null
+++ b/doc/_static/spinner.gif
Binary files differ
diff --git a/doc/_templates/demo.html b/doc/_templates/demo.html
new file mode 100644
index 0000000..8e2a7c6
--- /dev/null
+++ b/doc/_templates/demo.html
@@ -0,0 +1,97 @@
+{% extends "layout.html" %}
+{% set sidebars = sidebars + ["demo_sidebar.html"] %}
+
+{% block extrahead %}
+{{ super() }}
+<link rel="stylesheet" type="text/css" href="{{ pathto("_static/demo.css", 1) }}">
+{% endblock %}
+
+{% block htmltitle %}<title>Demo{{ titlesuffix }}</title>{% endblock %}
+
+{% block body %}
+{{ body }}
+
+<h1>Try out Pygments!</h1>
+
+<noscript>
+ <h2>This website requires JavaScript (and WebAssembly)</h2>
+
+ You can also try out pygments locally by running <code>pip install pygments</code>.
+ Then you can use <a href="{{pathto('docs/cmdline')}}">the command-line interface</a>.
+</noscript>
+
+<div id="try">
+ <p>
+ <label>Language
+ <select id="lang" autofocus>
+ <option value="">guess the language</option>
+ {% for name, info, _, _ in lexers %}
+ <option value="{{info.0}}">{{name}}</option>
+ {% endfor %}
+ </select>
+ </label>
+ <span id=guessed-lexer></span>
+ </p>
+ <p>
+ <label>
+ Enter some code:
+ <textarea id="code" rows="1" cols="60" spellcheck="false"></textarea>
+ </label>
+ </p>
+ <p>
+ <label>
+ Alternatively you can upload a file:
+ <input type="file" id="file">
+ </label>
+ <button id="reset-file">Reset</button>
+ </p>
+ <div id="format-settings">
+ <label>
+ Formatter
+ <select id=formatter>
+ <option value=html>HTML</option>
+ <option value=tokens>tokens</option>
+ </select>
+ </label>
+ <label>Style
+ <select id="style">
+ <optgroup label="Good contrast">
+ {% for style in styles_aa %}
+ <option data-wcag=aa>{{style.name}}</option>
+ {% endfor %}
+ </optgroup>
+ <optgroup label="Suboptimal contrast">
+ {% for style in styles_sub_aa %}
+ <option>{{style.name}}</option>
+ {% endfor %}
+ </optgroup>
+ </select>
+ </label>
+ <span id=contrast-warning hidden>style may have poor contrast</span>
+ </div>
+ </form>
+</div>
+
+<div id="loading" hidden>
+ <img src="{{ pathto("_static/spinner.gif", 1) }}" width="20">
+ <span id="loading-text">loading Python...</span>
+</div>
+
+<style id=css-style></style>
+
+<div id="hlcode"></div>
+
+<div id="code-header" hidden>
+ <div class=flex-grow-1></div>
+ <button onclick="download_code()">Download</button>
+ <a id="copylink" role="button">Copy link</a>
+ <span hidden id="uri-too-long">(Copy link unavailable because code too long)</span>
+</div>
+
+<p>The highlighting here is performed in-browser using
+ a WebAssembly translation of the latest Pygments master branch, courtesy of
+ <a href="https://github.com/iodide-project/pyodide">Pyodide</a>.</p>
+<p>Your content is neither sent over the web nor stored anywhere.</p>
+
+<script type="text/javascript" src="{{ pathto("_static/demo.js", 1) }}"></script>
+{% endblock %}
diff --git a/doc/_templates/demo_sidebar.html b/doc/_templates/demo_sidebar.html
new file mode 100644
index 0000000..3f2a86c
--- /dev/null
+++ b/doc/_templates/demo_sidebar.html
@@ -0,0 +1 @@
+<p><a href="#try">Back to top</a></p>
diff --git a/doc/_templates/docssidebar.html b/doc/_templates/docssidebar.html
new file mode 100644
index 0000000..913acaa
--- /dev/null
+++ b/doc/_templates/docssidebar.html
@@ -0,0 +1,3 @@
+{% if pagename != 'docs/index' %}
+<strong>&laquo; <a href="{{ pathto('docs/index') }}">Back to docs index</a></strong>
+{% endif %}
diff --git a/doc/_templates/index_with_try.html b/doc/_templates/index_with_try.html
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/doc/_templates/index_with_try.html
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
new file mode 100644
index 0000000..5aa5019
--- /dev/null
+++ b/doc/_templates/indexsidebar.html
@@ -0,0 +1,18 @@
+<section>
+<h3>Download</h3>
+<p>Current version: <b>{{ version }}</b><br><a href="{{ pathto('docs/changelog') }}">Changelog</a></p>
+<p>Get Pygments from the <a href="https://pypi.python.org/pypi/Pygments">Python Package
+ Index</a>, or install it with:</p>
+<pre>pip install Pygments</pre>
+</section>
+<section>
+<h3>Questions? Suggestions?</h3>
+
+<p><img src="{{ pathto("_static/github.png", 1) }}" width="24" />
+ Clone at <a href="https://github.com/pygments/pygments">GitHub</a>.</p>
+<p>You can also open an issue at the
+ <a href="https://github.com/pygments/pygments/issues">tracker</a>.</p>
+</section>
+
+<p class="logo">A <a href="https://www.pocoo.org/">
+ <img src="{{ pathto("_static/pocoo.png", 1) }}" /></a> project</a></p>
diff --git a/doc/_templates/styles.html b/doc/_templates/styles.html
new file mode 100644
index 0000000..137fa24
--- /dev/null
+++ b/doc/_templates/styles.html
@@ -0,0 +1,55 @@
+{% extends "layout.html" %}
+
+{% block htmltitle %}<title>Styles{{ titlesuffix }}</title>{% endblock %}
+
+{% block body %}
+<style>
+.style-gallery {
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-around;
+}
+h2 {
+ margin-top: 2em;
+}
+.style-gallery h3 {
+ margin-bottom: 0.1em;
+}
+.style-gallery pre {
+ background-color: inherit;
+}
+</style>
+{{ body }}
+
+<h1>Styles</h1>
+
+<p>Pygments comes with the following builtin styles.
+For more information about styles refer to <a href="{{ pathto('docs/styles') }}">the documentation</a>.
+</p>
+
+<div class=style-gallery>
+{% for style in styles_aa %}
+ <div>
+ <h3 id="{{style.name}}">{{style.name}}</h3>
+ {{style.html|safe}}
+ </div>
+{% endfor %}
+</div>
+
+<h2>Styles with a lower contrast</h2>
+<p>
+The following styles do not meet the <a href="https://www.w3.org/WAI/WCAG21/Understanding/contrast-minimum.html">WCAG 2.1 AA contrast minimum</a>,
+so they might be difficult to read for people with suboptimal vision.
+If you want your highlighted code to be well readable for other people, you
+should use one of the earlier styles instead.
+</p>
+<div class=style-gallery>
+ {% for style in styles_sub_aa %}
+ <div>
+ <h3 id="{{style.name}}">{{style.name}}</h3>
+ {{style.html|safe}}
+ </div>
+ {% endfor %}
+</div>
+
+{% endblock %}
diff --git a/doc/_themes/pygments14/layout.html b/doc/_themes/pygments14/layout.html
new file mode 100644
index 0000000..34e86ef
--- /dev/null
+++ b/doc/_themes/pygments14/layout.html
@@ -0,0 +1,101 @@
+{#
+ sphinxdoc/layout.html
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx layout template for the sphinxdoc theme.
+
+ :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- extends "basic/layout.html" %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
+{% block relbar1 %}{% endblock %}
+{% block relbar2 %}{% endblock %}
+
+{% block extrahead %}
+ <link href='https://fonts.googleapis.com/css?family={{ theme_font|replace(' ', '+') }}:300,400,700'
+ rel='stylesheet' type='text/css'>
+{{ super() }}
+{%- if not embedded %}
+ <style type="text/css">
+ table.right { float: right; margin-left: 20px; }
+ table.right td { border: 1px solid #ccc; }
+ {% if pagename == 'index' %}
+ .related { display: none; }
+ {% endif %}
+ </style>
+ <script type="text/javascript">
+ // intelligent scrolling of the sidebar content
+ $(window).scroll(function() {
+ var sb = $('.sphinxsidebarwrapper');
+ var win = $(window);
+ var sbh = sb.height();
+ var offset = $('.sphinxsidebar').position()['top'];
+ var wintop = win.scrollTop();
+ var winbot = wintop + win.innerHeight();
+ var curtop = sb.position()['top'];
+ var curbot = curtop + sbh;
+ // does sidebar fit in window?
+ if (sbh < win.innerHeight()) {
+ // yes: easy case -- always keep at the top
+ sb.css('top', $u.min([$u.max([0, wintop - offset - 10]),
+ $(document).height() - sbh - 200]));
+ } else {
+ // no: only scroll if top/bottom edge of sidebar is at
+ // top/bottom edge of window
+ if (curtop > wintop && curbot > winbot) {
+ sb.css('top', $u.max([wintop - offset - 10, 0]));
+ } else if (curtop < wintop && curbot < winbot) {
+ sb.css('top', $u.min([winbot - sbh - offset - 20,
+ $(document).height() - sbh - 200]));
+ }
+ }
+ });
+ </script>
+{%- endif %}
+{% endblock %}
+
+{% block header %}
+<div class="outerwrapper">
+<div class="pageheader">
+ <ul>
+ <li><a href="{{ pathto('index') }}">Home</a></li>
+ {% if demo_active %}
+ <li><a href="{{ pathto('demo') }}">Demo</a></li>
+ {% endif %}
+ <li><a href="{{ pathto('languages') }}">Languages</a></li>
+ <li><a href="{{ pathto('styles') }}">Styles</a></li>
+ <li><a href="{{ pathto('faq') }}">FAQ</a></li>
+ <li><a href="{{ pathto('download') }}">Get it</a></li>
+ <li><a href="{{ pathto('docs/index') }}">Docs</a></li>
+ </ul>
+ <div>
+ <a href="{{ pathto('index') }}">
+ <img src="{{ pathto('_static/logo.png', 1) }}" alt="Pygments logo" />
+ </a>
+ </div>
+</div>
+<div class="flexwrapper">
+{% endblock %}
+
+{% block footer %}
+ </div> {# closes "flexwrapper" div #}
+ <div class="footer" role="contentinfo">
+ &copy; Copyright 2006-2022, Georg Brandl and Pygments contributors.
+ Created using <a href="https://sphinx-doc.org/">Sphinx</a> {{
+ sphinx_version }}. <br/>
+ Pygments logo created by <a href="https://joelunger.com">Joel Unger</a>.
+ Backgrounds from <a href="https://subtlepatterns.com">subtlepatterns.com</a>.
+ </div>
+ </div> {# closes "outerwrapper" div #}
+{% endblock %}
+
+{% block sidebarrel %}
+{% endblock %}
+
+{% block sidebarsourcelink %}
+{% endblock %}
diff --git a/doc/_themes/pygments14/localtoc.html b/doc/_themes/pygments14/localtoc.html
new file mode 100644
index 0000000..c0e2de0
--- /dev/null
+++ b/doc/_themes/pygments14/localtoc.html
@@ -0,0 +1,17 @@
+{#
+ basic/localtoc.html
+ ~~~~~~~~~~~~~~~~~~~
+
+ Sphinx sidebar template: local table of contents.
+
+ This file can be removed once https://github.com/sphinx-doc/sphinx/pull/9815 has landed.
+
+ :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- if display_toc %}
+ <div>
+ <h3><a href="{{ pathto(root_doc)|e }}">{{ _('Table of Contents') }}</a></h3>
+ {{ toc }}
+ </div>
+{%- endif %}
diff --git a/doc/_themes/pygments14/relations.html b/doc/_themes/pygments14/relations.html
new file mode 100644
index 0000000..372894d
--- /dev/null
+++ b/doc/_themes/pygments14/relations.html
@@ -0,0 +1,25 @@
+{#
+ basic/relations.html
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx sidebar template: relation links.
+
+ This file can be removed once https://github.com/sphinx-doc/sphinx/pull/9815 has landed.
+
+ :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- if prev %}
+<div>
+ <h4>{{ _('Previous topic') }}</h4>
+ <p class="topless"><a href="{{ prev.link|e }}"
+ title="{{ _('previous chapter') }}">{{ prev.title }}</a></p>
+</div>
+{%- endif %}
+{%- if next %}
+<div>
+ <h4>{{ _('Next topic') }}</h4>
+ <p class="topless"><a href="{{ next.link|e }}"
+ title="{{ _('next chapter') }}">{{ next.title }}</a></p>
+</div>
+{%- endif %}
diff --git a/doc/_themes/pygments14/static/bodybg.png b/doc/_themes/pygments14/static/bodybg.png
new file mode 100644
index 0000000..46892b8
--- /dev/null
+++ b/doc/_themes/pygments14/static/bodybg.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/docbg.png b/doc/_themes/pygments14/static/docbg.png
new file mode 100644
index 0000000..13e61f3
--- /dev/null
+++ b/doc/_themes/pygments14/static/docbg.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/listitem.png b/doc/_themes/pygments14/static/listitem.png
new file mode 100644
index 0000000..e45715f
--- /dev/null
+++ b/doc/_themes/pygments14/static/listitem.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/logo.png b/doc/_themes/pygments14/static/logo.png
new file mode 100644
index 0000000..2c1a24d
--- /dev/null
+++ b/doc/_themes/pygments14/static/logo.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/pocoo.png b/doc/_themes/pygments14/static/pocoo.png
new file mode 100644
index 0000000..4174149
--- /dev/null
+++ b/doc/_themes/pygments14/static/pocoo.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/pygments14.css_t b/doc/_themes/pygments14/static/pygments14.css_t
new file mode 100644
index 0000000..4355074
--- /dev/null
+++ b/doc/_themes/pygments14/static/pygments14.css_t
@@ -0,0 +1,422 @@
+/*
+ * pygments14.css
+ * ~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- pygments14 theme. Heavily copied from sphinx13.
+ *
+ * :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ text-align: center;
+ background-image: url(bodybg.png);
+ background-color: {{ theme_background }};
+ color: black;
+ padding: 0;
+ /*
+ border-right: 1px solid {{ theme_border }};
+ border-left: 1px solid {{ theme_border }};
+ */
+
+ margin: 0 auto;
+ max-width: 1080px;
+}
+
+.outerwrapper {
+ background-image: url(docbg.png);
+ background-attachment: fixed;
+}
+
+.pageheader {
+ text-align: left;
+ padding: 10px 15px;
+}
+
+.pageheader ul {
+ float: right;
+ color: white;
+ list-style-type: none;
+ padding-left: 0;
+ margin-top: 40px;
+ margin-right: 10px;
+}
+
+.pageheader li {
+ float: left;
+ margin: 0 0 0 10px;
+}
+
+.pageheader li a {
+ border-radius: 3px;
+ padding: 8px 12px;
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 5px rgba(0, 0, 0, 0.2);
+}
+
+.pageheader li a:hover {
+ background-color: {{ theme_yellow }};
+ color: black;
+ text-shadow: none;
+}
+
+div.document {
+ width: 700px;
+ flex-grow: 100;
+ text-align: left;
+ /*border-left: 1em solid {{ theme_lightyellow }};*/
+ min-width: 500px;
+}
+
+@media screen and (max-width: 550px) {
+ div.document {
+ min-width: inherit;
+ }
+}
+
+div.bodywrapper {
+ background-color: white;
+/* border-right: 1px solid {{ theme_border }}; */
+}
+
+.flexwrapper {
+ display: flex;
+ gap: 15px;
+ flex-wrap: wrap;
+ padding-right: 12px;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+ width: 100%;
+ box-sizing: border-box;
+}
+
+div.related {
+ font-size: 1em;
+ color: {{ theme_darkgray }};
+}
+
+div.related ul {
+ background-image: url(relbg.png);
+ background-repeat: repeat-y;
+ background-color: {{ theme_yellow }};
+ height: 1.9em;
+ /*
+ border-top: 1px solid {{ theme_border }};
+ border-bottom: 1px solid {{ theme_border }};
+ */
+}
+
+div.related ul li {
+ margin: 0 5px 0 0;
+ padding: 0;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: {{ theme_darkgray }};
+ /*text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);*/
+}
+
+div.related ul li a:hover {
+ text-decoration: underline;
+ text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0 0px 15px 15px;
+ width: 210px;
+ float: none;
+ font-size: 1em;
+ text-align: left;
+ flex-grow: 1;
+}
+
+.sphinxsidebarwrapper > * {
+ flex: 1 1 0px;
+ min-width: 200px;
+}
+
+div.sphinxsidebar .logo {
+ font-size: 1.8em;
+ color: #666;
+ font-weight: 300;
+ text-align: center;
+}
+
+div.sphinxsidebar .logo img {
+ vertical-align: middle;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #aaa;
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 1em;
+}
+
+div.sphinxsidebar h3 {
+ font-size: 1.5em;
+ /* border-top: 1px solid {{ theme_border }}; */
+ margin-top: 0;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 1.2em;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin-left: -15px;
+ padding-right: 14px;
+ padding-left: 14px;
+ color: #333;
+ font-weight: 300;
+ /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
+}
+
+div.sphinxsidebarwrapper {
+ padding: 0;
+ display: flex;
+ flex-wrap: wrap;
+ gap: 15px;
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+ margin-top: 0.5em;
+ border: none;
+}
+
+div.sphinxsidebar h3 a {
+ color: #333;
+}
+
+div.sphinxsidebar ul {
+ color: #444;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+ list-style-image: url(listitem.png);
+}
+
+div.footer {
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
+ padding: 2em;
+ text-align: center;
+ clear: both;
+ font-size: 0.8em;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: {{ theme_darkgreen }};
+ text-decoration: none;
+}
+
+a:hover {
+ color: {{ theme_darkyellow }};
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+h1 {
+ margin: 10px 0 0 0;
+ font-size: 2.4em;
+ color: {{ theme_darkgray }};
+ font-weight: 300;
+}
+
+h2 {
+ margin: 1.em 0 0.2em 0;
+ font-size: 1.5em;
+ font-weight: 300;
+ padding: 0;
+ color: {{ theme_darkgreen }};
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.3em;
+ font-weight: 300;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ text-decoration: none;
+}
+
+div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
+ color: {{ theme_darkgreen }} !important;
+ font-size: inherit !important;
+}
+
+a.headerlink {
+ color: {{ theme_green }} !important;
+ font-size: 12px;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none !important;
+ float: right;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 14px;
+ letter-spacing: -0.02em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border: 1px solid #ddd;
+ border-radius: 2px;
+ color: #333;
+ padding: 1px;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: {{ theme_darkgreen }};
+}
+
+a tt:hover {
+ color: {{ theme_darkyellow }};
+}
+
+pre {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 13px;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ border-radius: 2px;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 0px 7px;
+ border: 1px solid #ccc;
+ margin-left: 1em;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ border-radius: 2px;
+ background-color: #f7f7f7;
+ padding: 0;
+ padding-bottom: 0.5rem;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ font-weight: bold;
+}
+
+div.warning {
+ border: 1px solid #940000;
+/* background-color: #FFCCCF;*/
+}
+
+div.warning p.admonition-title {
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+.viewcode-back {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+ background-color: #f4debf;
+ border-top: 1px solid #ac9;
+ border-bottom: 1px solid #ac9;
+}
diff --git a/doc/_themes/pygments14/theme.conf b/doc/_themes/pygments14/theme.conf
new file mode 100644
index 0000000..8d2988f
--- /dev/null
+++ b/doc/_themes/pygments14/theme.conf
@@ -0,0 +1,17 @@
+[theme]
+inherit = basic
+stylesheet = pygments14.css
+pygments_style = friendly
+
+[options]
+body_min_width = inherit
+body_max_width = inherit
+green = #66b55e
+darkgreen = #36852e
+darkgray = #666666
+border = #66b55e
+yellow = #f4cd00
+darkyellow = #d4ad00
+lightyellow = #fffbe3
+background = #f9f9f9
+font = PT Sans
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..f42c355
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,291 @@
+#
+# Pygments documentation build configuration file
+#
+
+import re, sys, os, itertools
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+import pygments
+import pygments.formatters
+import pygments.lexers
+import pygments.styles
+import tests.contrast.test_contrasts as test_contrasts
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments.sphinxext']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'Pygments'
+copyright = '2006-2022, Georg Brandl and Pygments contributors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = pygments.__version__
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+#pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'pygments14'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['_themes']
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+html_title = 'Pygments'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = '_static/favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+html_additional_pages = {
+ 'styles': 'styles.html',
+ }
+
+if os.environ.get('WEBSITE_BUILD'):
+ html_additional_pages['demo'] = 'demo.html'
+ html_static_path.append('_build/pyodide')
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Pygments'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('docs/index', 'Pygments.tex', 'Pygments Documentation',
+ 'Pygments authors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('docs/index', 'pygments', 'Pygments Documentation',
+ ['Pygments authors'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
+
+rst_prolog = '.. |language_count| replace:: {}'.format(len(list(pygments.lexers.get_all_lexers())))
+
+def pg_context(app, pagename, templatename, ctx, event_arg):
+ ctx['demo_active'] = bool(os.environ.get('WEBSITE_BUILD'))
+
+ if pagename == 'demo':
+ ctx['lexers'] = sorted(pygments.lexers.get_all_lexers(plugins=False), key=lambda x: x[0].lower())
+
+ if pagename in ('styles', 'demo'):
+ with open('examples/example.py') as f:
+ html = f.read()
+ lexer = pygments.lexers.get_lexer_for_filename('example.py')
+ min_contrasts = test_contrasts.min_contrasts()
+ ctx['styles_aa'] = []
+ ctx['styles_sub_aa'] = []
+ # Use STYLE_MAP directly so we don't get plugins as with get_all_styles().
+ for style in pygments.styles.STYLE_MAP:
+ if not pygments.styles.get_style_by_name(style).web_style_gallery_exclude:
+ aa = min_contrasts[style] >= test_contrasts.WCAG_AA_CONTRAST
+ bg_r, bg_g, bg_b = test_contrasts.hex2rgb(pygments.styles.get_style_by_name(style).background_color)
+ ctx['styles_aa' if aa else 'styles_sub_aa'].append(
+ dict(
+ name=style,
+ html=pygments.highlight(
+ html,
+ lexer,
+ pygments.formatters.HtmlFormatter(noclasses=True, style=style),
+ ),
+ # from https://en.wikipedia.org/wiki/Relative_luminance
+ bg_luminance=(0.2126*bg_r + 0.7152*bg_g + 0.0722*bg_b)
+ )
+ )
+
+ # sort styles according to their background luminance (light styles first)
+ # if styles have the same background luminance sort them by their name
+ sortkey = lambda s: (-s['bg_luminance'], s['name'])
+ # the default style is always displayed first
+ default_style = ctx['styles_aa'].pop(0)
+ ctx['styles_aa'].sort(key=sortkey)
+ ctx['styles_aa'].insert(0, default_style)
+ ctx['styles_sub_aa'].sort(key=sortkey)
+
+
+def source_read(app, docname, source):
+ # linkify issue / PR numbers in changelog
+ if docname == 'docs/changelog':
+ with open('../CHANGES') as f:
+ changelog = f.read()
+
+ idx = changelog.find('\nVersion 2.4.2\n')
+
+ def linkify(match):
+ url = 'https://github.com/pygments/pygments/issues/' + match[1]
+ return '`{} <{}>`_'.format(match[0], url)
+
+ linkified = re.sub(r'(?:PR)?#([0-9]+)\b', linkify, changelog[:idx])
+ source[0] = linkified + changelog[idx:]
+
+
+def setup(app):
+ app.connect('html-page-context', pg_context)
+ app.connect('source-read', source_read)
diff --git a/doc/docs/api.rst b/doc/docs/api.rst
new file mode 100644
index 0000000..4d330bf
--- /dev/null
+++ b/doc/docs/api.rst
@@ -0,0 +1,360 @@
+.. -*- mode: rst -*-
+
+=====================
+The full Pygments API
+=====================
+
+This page describes the Pygments API.
+
+High-level API
+==============
+
+.. module:: pygments
+
+Functions from the :mod:`pygments` module:
+
+.. function:: lex(code, lexer)
+
+ Lex `code` with the `lexer` (must be a `Lexer` instance)
+ and return an iterable of tokens. Currently, this only calls
+ `lexer.get_tokens()`.
+
+.. function:: format(tokens, formatter, outfile=None)
+
+ Format a token stream (iterable of tokens) `tokens` with the
+ `formatter` (must be a `Formatter` instance). The result is
+ written to `outfile`, or if that is ``None``, returned as a
+ string.
+
+.. function:: highlight(code, lexer, formatter, outfile=None)
+
+ This is the most high-level highlighting function.
+ It combines `lex` and `format` in one function.
+
+
+.. module:: pygments.lexers
+
+Functions from :mod:`pygments.lexers`:
+
+.. function:: get_lexer_by_name(alias, **options)
+
+ Return an instance of a `Lexer` subclass that has `alias` in its
+ aliases list. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
+ found.
+
+.. function:: get_lexer_for_filename(fn, **options)
+
+ Return a `Lexer` subclass instance that has a filename pattern
+ matching `fn`. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
+ is found.
+
+.. function:: get_lexer_for_mimetype(mime, **options)
+
+ Return a `Lexer` subclass instance that has `mime` in its mimetype
+ list. The lexer is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
+ is found.
+
+.. function:: load_lexer_from_file(filename, lexername="CustomLexer", **options)
+
+ Return a `Lexer` subclass instance loaded from the provided file, relative
+ to the current directory. The file is expected to contain a Lexer class
+ named `lexername` (by default, CustomLexer). Users should be very careful with
+ the input, because this method is equivalent to running eval on the input file.
+ The lexer is given the `options` at its instantiation.
+
+ :exc:`ClassNotFound` is raised if there are any errors loading the Lexer
+
+ .. versionadded:: 2.2
+
+.. function:: guess_lexer(text, **options)
+
+ Return a `Lexer` subclass instance that's guessed from the text in
+ `text`. For that, the :meth:`.analyse_text()` method of every known lexer
+ class is called with the text as argument, and the lexer which returned the
+ highest value will be instantiated and returned.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: guess_lexer_for_filename(filename, text, **options)
+
+ As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
+ or `alias_filenames` that matches `filename` are taken into consideration.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: get_all_lexers()
+
+ Return an iterable over all registered lexers, yielding tuples in the
+ format::
+
+ (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
+
+ .. versionadded:: 0.6
+
+.. function:: find_lexer_class_by_name(alias)
+
+ Return the `Lexer` subclass that has `alias` in its aliases list, without
+ instantiating it.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
+ found.
+
+ .. versionadded:: 2.2
+
+.. function:: find_lexer_class(name)
+
+ Return the `Lexer` subclass that with the *name* attribute as given by
+ the *name* argument.
+
+
+.. module:: pygments.formatters
+
+Functions from :mod:`pygments.formatters`:
+
+.. function:: get_formatter_by_name(alias, **options)
+
+ Return an instance of a :class:`.Formatter` subclass that has `alias` in its
+ aliases list. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
+ alias is found.
+
+.. function:: get_formatter_for_filename(fn, **options)
+
+ Return a :class:`.Formatter` subclass instance that has a filename pattern
+ matching `fn`. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
+ is found.
+
+.. function:: load_formatter_from_file(filename, formattername="CustomFormatter", **options)
+
+ Return a `Formatter` subclass instance loaded from the provided file, relative
+ to the current directory. The file is expected to contain a Formatter class
+ named ``formattername`` (by default, CustomFormatter). Users should be very
+ careful with the input, because this method is equivalent to running eval
+ on the input file. The formatter is given the `options` at its instantiation.
+
+ :exc:`ClassNotFound` is raised if there are any errors loading the Formatter
+
+ .. versionadded:: 2.2
+
+.. module:: pygments.styles
+
+Functions from :mod:`pygments.styles`:
+
+.. function:: get_style_by_name(name)
+
+ Return a style class by its short name. The names of the builtin styles
+ are listed in :data:`pygments.styles.STYLE_MAP`.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
+ found.
+
+.. function:: get_all_styles()
+
+ Return an iterable over all registered styles, yielding their names.
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.lexer
+
+Lexers
+======
+
+The base lexer class from which all lexers are derived is:
+
+.. class:: Lexer(**options)
+
+ The constructor takes a \*\*keywords dictionary of options.
+ Every subclass must first process its own options and then call
+ the `Lexer` constructor, since it processes the `stripnl`,
+ `stripall` and `tabsize` options.
+
+ An example looks like this:
+
+ .. sourcecode:: python
+
+ def __init__(self, **options):
+ self.compress = options.get('compress', '')
+ Lexer.__init__(self, **options)
+
+ As these options must all be specifiable as strings (due to the
+ command line usage), there are various utility functions
+ available to help with that, see `Option processing`_.
+
+ .. method:: get_tokens(text)
+
+ This method is the basic interface of a lexer. It is called by
+ the `highlight()` function. It must process the text and return an
+ iterable of ``(tokentype, value)`` pairs from `text`.
+
+ Normally, you don't need to override this method. The default
+ implementation processes the `stripnl`, `stripall` and `tabsize`
+ options and then yields all tokens from `get_tokens_unprocessed()`,
+ with the ``index`` dropped.
+
+ .. method:: get_tokens_unprocessed(text)
+
+ This method should process the text and return an iterable of
+ ``(index, tokentype, value)`` tuples where ``index`` is the starting
+ position of the token within the input text.
+
+ This method must be overridden by subclasses.
+
+ .. staticmethod:: analyse_text(text)
+
+ A static method which is called for lexer guessing. It should analyse
+ the text and return a float in the range from ``0.0`` to ``1.0``.
+ If it returns ``0.0``, the lexer will not be selected as the most
+ probable one, if it returns ``1.0``, it will be selected immediately.
+
+ .. note:: You don't have to add ``@staticmethod`` to the definition of
+ this method, this will be taken care of by the Lexer's metaclass.
+
+ For a list of known tokens have a look at the :doc:`tokens` page.
+
+ A lexer also can have the following attributes (in fact, they are mandatory
+ except `alias_filenames`) that are used by the builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the lexer, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the lexer from a list, e.g. using `get_lexer_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of `fnmatch` patterns that match filenames which contain
+ content for this lexer. The patterns in this list should be unique among
+ all lexers.
+
+ .. attribute:: alias_filenames
+
+ A list of `fnmatch` patterns that match filenames which may or may not
+ contain content for this lexer. This list is used by the
+ :func:`.guess_lexer_for_filename()` function, to determine which lexers
+ are then included in guessing the correct one. That means that
+ e.g. every lexer for HTML and a template language should include
+ ``\*.html`` in this list.
+
+ .. attribute:: mimetypes
+
+ A list of MIME types for content that can be lexed with this
+ lexer.
+
+
+.. module:: pygments.formatter
+
+Formatters
+==========
+
+A formatter is derived from this class:
+
+
+.. class:: Formatter(**options)
+
+ As with lexers, this constructor processes options and then must call the
+ base class :meth:`__init__`.
+
+ The :class:`Formatter` class recognizes the options `style`, `full` and
+ `title`. It is up to the formatter class whether it uses them.
+
+ .. method:: get_style_defs(arg='')
+
+ This method must return statements or declarations suitable to define
+ the current style for subsequent highlighted text (e.g. CSS classes
+ in the `HTMLFormatter`).
+
+ The optional argument `arg` can be used to modify the generation and
+ is formatter dependent (it is standardized because it can be given on
+ the command line).
+
+ This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
+ the `arg` is then given by the ``-a`` option.
+
+ .. method:: format(tokensource, outfile)
+
+ This method must format the tokens from the `tokensource` iterable and
+ write the formatted version to the file object `outfile`.
+
+ Formatter options can control how exactly the tokens are converted.
+
+ .. versionadded:: 0.7
+ A formatter must have the following attributes that are used by the
+ builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the formatter, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of :mod:`fnmatch` patterns that match filenames for which this
+ formatter can produce output. The patterns in this list should be unique
+ among all formatters.
+
+
+.. module:: pygments.util
+
+Option processing
+=================
+
+The :mod:`pygments.util` module has some utility functions usable for processing
+command line options. All of the following functions get values from a
+dictionary of options. If the value is already in the type expected by the
+option, it is returned as-is. Otherwise, if the value is a string, it is first
+converted to the expected type if possible.
+
+.. exception:: OptionError
+
+ This exception will be raised by all option processing functions if
+ the type or value of the argument is not correct.
+
+.. function:: get_bool_opt(options, optname, default=None)
+
+ Intuitively, this is `options.get(optname, default)`, but restricted to
+ Boolean value. The Booleans can be represented as string, in order to accept
+ Boolean value from the command line arguments. If the key `optname` is
+ present in the dictionary `options` and is not associated with a Boolean,
+ raise an `OptionError`. If it is absent, `default` is returned instead.
+
+ The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
+ ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
+ (matched case-insensitively).
+
+.. function:: get_int_opt(options, optname, default=None)
+
+ As :func:`get_bool_opt`, but interpret the value as an integer.
+
+.. function:: get_list_opt(options, optname, default=None)
+
+ If the key `optname` from the dictionary `options` is a string,
+ split it at whitespace and return it. If it is already a list
+ or a tuple, it is returned as a list.
+
+.. function:: get_choice_opt(options, optname, allowed, default=None)
+
+ If the key `optname` from the dictionary is not in the sequence
+ `allowed`, raise an error, otherwise return it.
+
+ .. versionadded:: 0.8
diff --git a/doc/docs/authors.rst b/doc/docs/authors.rst
new file mode 100644
index 0000000..f8373f0
--- /dev/null
+++ b/doc/docs/authors.rst
@@ -0,0 +1,4 @@
+Full contributor list
+=====================
+
+.. include:: ../../AUTHORS
diff --git a/doc/docs/changelog.rst b/doc/docs/changelog.rst
new file mode 100644
index 0000000..f264cab
--- /dev/null
+++ b/doc/docs/changelog.rst
@@ -0,0 +1 @@
+.. include:: ../../CHANGES
diff --git a/doc/docs/cmdline.rst b/doc/docs/cmdline.rst
new file mode 100644
index 0000000..b07b3e4
--- /dev/null
+++ b/doc/docs/cmdline.rst
@@ -0,0 +1,218 @@
+.. -*- mode: rst -*-
+
+======================
+Command Line Interface
+======================
+
+You can use Pygments from the shell, provided you installed the
+:program:`pygmentize` script::
+
+ $ pygmentize test.py
+ print "Hello World"
+
+will print the file test.py to standard output, using the Python lexer
+(inferred from the file name extension) and the terminal formatter (because
+you didn't give an explicit formatter name).
+:program:`pygmentize` attempts to
+detect the maximum number of colors that the terminal supports. The difference
+between color formatters for 16 and 256 colors is immense, but there is a less
+noticeable difference between color formatters for 256 and 16 million colors.
+
+Here's the process of how it detects the maxiumum number of colors
+supported by your terminal. If the ``COLORTERM`` environment variable is set to
+either ``truecolor`` or ``24bit``, it will use a 16 million color representation
+(like ``terminal16m``). Next, it will try to find ``256`` is anywhere in the
+environment variable ``TERM``, which it will use a 256-color representaion
+(such as ``terminal256``). When neither of those are found, it falls back to a
+the 16 color representation (like ``terminal``).
+
+If you want HTML output::
+
+ $ pygmentize -f html -l python -o test.html test.py
+
+As you can see, the -l option explicitly selects a lexer. As seen above, if you
+give an input file name and it has an extension that Pygments recognizes, you can
+omit this option.
+
+The ``-o`` option gives an output file name. If it is not given, output is
+written to stdout.
+
+The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted
+if an output file name is given and has a supported extension).
+If no output file name is given and ``-f`` is omitted, the
+:class:`.TerminalFormatter` is used.
+
+The above command could therefore also be given as::
+
+ $ pygmentize -o test.html test.py
+
+To create a full HTML document, including line numbers and stylesheet (using the
+"emacs" style), highlighting the Python file ``test.py`` to ``test.html``::
+
+ $ pygmentize -O full,style=emacs,linenos=1 -o test.html test.py
+
+
+Options and filters
+-------------------
+
+Lexer and formatter options can be given using the ``-O`` option::
+
+ $ pygmentize -f html -O style=colorful,linenos=1 -l python test.py
+
+Be sure to enclose the option string in quotes if it contains any special shell
+characters, such as spaces or expansion wildcards like ``*``. If an option
+expects a list value, separate the list entries with spaces (you'll have to
+quote the option value in this case too, so that the shell doesn't split it).
+
+Since the ``-O`` option argument is split at commas and expects the split values
+to be of the form ``name=value``, you can't give an option value that contains
+commas or equals signs. Therefore, an option ``-P`` is provided (as of Pygments
+0.9) that works like ``-O`` but can only pass one option per ``-P``. Its value
+can then contain all characters::
+
+ $ pygmentize -P "heading=Pygments, the Python highlighter" ...
+
+Filters are added to the token stream using the ``-F`` option::
+
+ $ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas
+
+As you see, options for the filter are given after a colon. As for ``-O``, the
+filter name and options must be one shell word, so there may not be any spaces
+around the colon.
+
+
+Generating styles
+-----------------
+
+Formatters normally don't output full style information. For example, the HTML
+formatter by default only outputs ``<span>`` tags with ``class`` attributes.
+Therefore, there's a special ``-S`` option for generating style definitions.
+Usage is as follows::
+
+ $ pygmentize -f html -S colorful -a .syntax
+
+generates a CSS style sheet (because you selected the HTML formatter) for
+the "colorful" style prepending a ".syntax" selector to all style rules.
+
+For an explanation what ``-a`` means for :doc:`a particular formatter
+<formatters>`, look for the `arg` argument for the formatter's
+:meth:`.get_style_defs()` method.
+
+
+Getting lexer names
+-------------------
+
+.. versionadded:: 1.0
+
+The ``-N`` option guesses a lexer name for a given filename, so that ::
+
+ $ pygmentize -N setup.py
+
+will print out ``python``. It won't highlight anything yet. If no specific
+lexer is known for that filename, ``text`` is printed.
+
+Additionally, there is the ``-C`` option, which is just like like ``-N``, except
+that it prints out a lexer name based solely on a given content from standard
+input.
+
+
+Guessing the lexer from the file contents
+-----------------------------------------
+
+The ``-g`` option will try to guess the correct lexer from the file contents,
+or pass through as plain text if nothing can be guessed. This option also looks
+for Vim modelines in the text, and for *some* languages, shebangs. Usage is as
+follows::
+
+ $ pygmentize -g setup.py
+
+Note though, that this option is not very relaiable, and probably should be
+used only if Pygments is not able to guess the correct lexer from the file's
+extension.
+
+
+Highlighting stdin until EOF
+----------------------------
+
+The ``-s`` option processes lines one at a time until EOF, rather than waiting
+to process the entire file. This only works for stdin, only for lexers with no
+line-spanning constructs, and is intended for streaming input such as you get
+from `tail -f`. Usage is as follows::
+
+ $ tail -f sql.log | pygmentize -s -l sql
+
+
+Custom Lexers and Formatters
+----------------------------
+
+.. versionadded:: 2.2
+
+The ``-x`` flag enables custom lexers and formatters to be loaded
+from files relative to the current directory. Create a file with a class named
+CustomLexer or CustomFormatter, then specify it on the command line::
+
+ $ pygmentize -l your_lexer.py -f your_formatter.py -x
+
+You can also specify the name of your class with a colon::
+
+ $ pygmentize -l your_lexer.py:SomeLexer -x
+
+For more information, see :doc:`the Pygments documentation on Lexer development
+<lexerdevelopment>`.
+
+
+Getting help
+------------
+
+The ``-L`` option lists lexers, formatters, along with their short
+names and supported file name extensions, styles and filters. If you want to see
+only one category, give it as an argument::
+
+ $ pygmentize -L filters
+
+will list only all installed filters.
+
+.. versionadded:: 2.11
+
+The ``--json`` option can be used in conjunction with the ``-L`` option to
+output it's contents as JSON. Thus, to print all the installed styles and their
+description in JSON, use the command::
+
+ $ pygmentize -L styles --json
+
+The ``-H`` option will give you detailed information (the same that can be found
+in this documentation) about a lexer, formatter or filter. Usage is as follows::
+
+ $ pygmentize -H formatter html
+
+will print the help for the HTML formatter, while ::
+
+ $ pygmentize -H lexer python
+
+will print the help for the Python lexer, etc.
+
+
+A note on encodings
+-------------------
+
+.. versionadded:: 0.9
+
+Pygments tries to be smart regarding encodings in the formatting process:
+
+* If you give an ``encoding`` option, it will be used as the input and
+ output encoding.
+
+* If you give an ``outencoding`` option, it will override ``encoding``
+ as the output encoding.
+
+* If you give an ``inencoding`` option, it will override ``encoding``
+ as the input encoding.
+
+* If you don't give an encoding and have given an output file, the default
+ encoding for lexer and formatter is the terminal encoding or the default
+ locale encoding of the system. As a last resort, ``latin1`` is used (which
+ will pass through all non-ASCII characters).
+
+* If you don't give an encoding and haven't given an output file (that means
+ output is written to the console), the default encoding for lexer and
+ formatter is the terminal encoding (``sys.stdout.encoding``).
diff --git a/doc/docs/filterdevelopment.rst b/doc/docs/filterdevelopment.rst
new file mode 100644
index 0000000..004919e
--- /dev/null
+++ b/doc/docs/filterdevelopment.rst
@@ -0,0 +1,75 @@
+.. -*- mode: rst -*-
+
+=====================
+Write your own filter
+=====================
+
+.. versionadded:: 0.7
+
+Writing own filters is very easy. All you have to do is to subclass
+the `Filter` class and override the `filter` method. Additionally a
+filter is instantiated with some keyword arguments you can use to
+adjust the behavior of your filter.
+
+
+Subclassing Filters
+===================
+
+As an example, we write a filter that converts all `Name.Function` tokens
+to normal `Name` tokens to make the output less colorful.
+
+.. sourcecode:: python
+
+ from pygments.util import get_bool_opt
+ from pygments.token import Name
+ from pygments.filter import Filter
+
+ class UncolorFilter(Filter):
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.class_too = get_bool_opt(options, 'classtoo')
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype is Name.Function or (self.class_too and
+ ttype is Name.Class):
+ ttype = Name
+ yield ttype, value
+
+Some notes on the `lexer` argument: that can be quite confusing since it doesn't
+need to be a lexer instance. If a filter was added by using the `add_filter()`
+function of lexers, that lexer is registered for the filter. In that case
+`lexer` will refer to the lexer that has registered the filter. It *can* be used
+to access options passed to a lexer. Because it could be `None` you always have
+to check for that case if you access it.
+
+
+Using a decorator
+=================
+
+You can also use the `simplefilter` decorator from the `pygments.filter` module:
+
+.. sourcecode:: python
+
+ from pygments.util import get_bool_opt
+ from pygments.token import Name
+ from pygments.filter import simplefilter
+
+
+ @simplefilter
+ def uncolor(self, lexer, stream, options):
+ class_too = get_bool_opt(options, 'classtoo')
+ for ttype, value in stream:
+ if ttype is Name.Function or (class_too and
+ ttype is Name.Class):
+ ttype = Name
+ yield ttype, value
+
+
+You can instantiate this filter by calling `uncolor(classtoo=True)`, the same
+way that you would have instantiated the previous filter by calling
+`UncolorFilter(classtoo=True)`. Indeed, The decorator automatically ensures that
+`uncolor` is a class which subclasses an internal filter class. The class
+`uncolo` uses the decorated function as a method for filtering. (That's why
+there is a `self` argument that you probably won't end up using in the method.)
diff --git a/doc/docs/filters.rst b/doc/docs/filters.rst
new file mode 100644
index 0000000..5cdcb4c
--- /dev/null
+++ b/doc/docs/filters.rst
@@ -0,0 +1,48 @@
+.. -*- mode: rst -*-
+
+=======
+Filters
+=======
+
+.. versionadded:: 0.7
+
+Transforming a stream of tokens into another stream is called "filtering" and is
+done by filters. The most common example of filters transform each token by
+applying a simple rules such as highlighting the token if it is a TODO or
+another special word, or converting keywords to uppercase to enforce a style
+guide. More complex filters can transform the stream of tokens, such as removing
+the line indentation or merging tokens together. It should be noted that pygments
+filters are entirely unrelated to Python's `filter
+<https://docs.python.org/3/library/functions.html#filter>`_.
+
+An arbitrary number of filters can be applied to token streams coming from
+lexers to improve or annotate the output. To apply a filter, you can use the
+`add_filter()` method of a lexer:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.lexers import PythonLexer
+ >>> l = PythonLexer()
+ >>> # add a filter given by a string and options
+ >>> l.add_filter('codetagify', case='lower')
+ >>> l.filters
+ [<pygments.filters.CodeTagFilter object at 0xb785decc>]
+ >>> from pygments.filters import KeywordCaseFilter
+ >>> # or give an instance
+ >>> l.add_filter(KeywordCaseFilter(case='lower'))
+
+The `add_filter()` method takes keyword arguments which are forwarded to
+the constructor of the filter.
+
+To get a list of all registered filters by name, you can use the
+`get_all_filters()` function from the `pygments.filters` module that returns an
+iterable for all known filters.
+
+If you want to write your own filter, have a look at :doc:`Write your own filter
+<filterdevelopment>`.
+
+
+Builtin Filters
+===============
+
+.. pygmentsdoc:: filters
diff --git a/doc/docs/formatterdevelopment.rst b/doc/docs/formatterdevelopment.rst
new file mode 100644
index 0000000..2bfac05
--- /dev/null
+++ b/doc/docs/formatterdevelopment.rst
@@ -0,0 +1,169 @@
+.. -*- mode: rst -*-
+
+========================
+Write your own formatter
+========================
+
+As well as creating :doc:`your own lexer <lexerdevelopment>`, writing a new
+formatter for Pygments is easy and straightforward.
+
+A formatter is a class that is initialized with some keyword arguments (the
+formatter options) and that must provides a `format()` method.
+Additionally a formatter should provide a `get_style_defs()` method that
+returns the style definitions from the style in a form usable for the
+formatter's output format.
+
+
+Quickstart
+==========
+
+The most basic formatter shipped with Pygments is the `NullFormatter`. It just
+sends the value of a token to the output stream:
+
+.. sourcecode:: python
+
+ from pygments.formatter import Formatter
+
+ class NullFormatter(Formatter):
+ def format(self, tokensource, outfile):
+ for ttype, value in tokensource:
+ outfile.write(value)
+
+As you can see, the `format()` method is passed two parameters: `tokensource`
+and `outfile`. The first is an iterable of ``(token_type, value)`` tuples,
+the latter a file like object with a `write()` method.
+
+Because the formatter is that basic it doesn't overwrite the `get_style_defs()`
+method.
+
+
+Styles
+======
+
+Styles aren't instantiated but their metaclass provides some class functions
+so that you can access the style definitions easily.
+
+Styles are iterable and yield tuples in the form ``(ttype, d)`` where `ttype`
+is a token and `d` is a dict with the following keys:
+
+``'color'``
+ Hexadecimal color value (eg: ``'ff0000'`` for red) or `None` if not
+ defined.
+
+``'bold'``
+ `True` if the value should be bold
+
+``'italic'``
+ `True` if the value should be italic
+
+``'underline'``
+ `True` if the value should be underlined
+
+``'bgcolor'``
+ Hexadecimal color value for the background (eg: ``'eeeeeee'`` for light
+ gray) or `None` if not defined.
+
+``'border'``
+ Hexadecimal color value for the border (eg: ``'0000aa'`` for a dark
+ blue) or `None` for no border.
+
+Additional keys might appear in the future, formatters should ignore all keys
+they don't support.
+
+
+HTML 3.2 Formatter
+==================
+
+For an more complex example, let's implement a HTML 3.2 Formatter. We don't
+use CSS but inline markup (``<u>``, ``<font>``, etc). Because this isn't good
+style this formatter isn't in the standard library ;-)
+
+.. sourcecode:: python
+
+ from pygments.formatter import Formatter
+
+ class OldHtmlFormatter(Formatter):
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ # create a dict of (start, end) tuples that wrap the
+ # value of a token so that we can use it in the format
+ # method later
+ self.styles = {}
+
+ # we iterate over the `_styles` attribute of a style item
+ # that contains the parsed style values.
+ for token, style in self.style:
+ start = end = ''
+ # a style item is a tuple in the following form:
+ # colors are readily specified in hex: 'RRGGBB'
+ if style['color']:
+ start += '<font color="#%s">' % style['color']
+ end = '</font>' + end
+ if style['bold']:
+ start += '<b>'
+ end = '</b>' + end
+ if style['italic']:
+ start += '<i>'
+ end = '</i>' + end
+ if style['underline']:
+ start += '<u>'
+ end = '</u>' + end
+ self.styles[token] = (start, end)
+
+ def format(self, tokensource, outfile):
+ # lastval is a string we use for caching
+ # because it's possible that an lexer yields a number
+ # of consecutive tokens with the same token type.
+ # to minimize the size of the generated html markup we
+ # try to join the values of same-type tokens here
+ lastval = ''
+ lasttype = None
+
+ # wrap the whole output with <pre>
+ outfile.write('<pre>')
+
+ for ttype, value in tokensource:
+ # if the token type doesn't exist in the stylemap
+ # we try it with the parent of the token type
+ # eg: parent of Token.Literal.String.Double is
+ # Token.Literal.String
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ if ttype == lasttype:
+ # the current token type is the same of the last
+ # iteration. cache it
+ lastval += value
+ else:
+ # not the same token as last iteration, but we
+ # have some data in the buffer. wrap it with the
+ # defined style and write it to the output file
+ if lastval:
+ stylebegin, styleend = self.styles[lasttype]
+ outfile.write(stylebegin + lastval + styleend)
+ # set lastval/lasttype to current values
+ lastval = value
+ lasttype = ttype
+
+ # if something is left in the buffer, write it to the
+ # output file, then close the opened <pre> tag
+ if lastval:
+ stylebegin, styleend = self.styles[lasttype]
+ outfile.write(stylebegin + lastval + styleend)
+ outfile.write('</pre>\n')
+
+The comments should explain it. Again, this formatter doesn't override the
+`get_style_defs()` method. If we would have used CSS classes instead of
+inline HTML markup, we would need to generate the CSS first. For that
+purpose the `get_style_defs()` method exists:
+
+
+Generating Style Definitions
+============================
+
+Some formatters like the `LatexFormatter` and the `HtmlFormatter` don't
+output inline markup but reference either macros or css classes. Because
+the definitions of those are not part of the output, the `get_style_defs()`
+method exists. It is passed one parameter (if it's used and how it's used
+is up to the formatter) and has to return a string or ``None``.
diff --git a/doc/docs/formatters.rst b/doc/docs/formatters.rst
new file mode 100644
index 0000000..9e7074e
--- /dev/null
+++ b/doc/docs/formatters.rst
@@ -0,0 +1,48 @@
+.. -*- mode: rst -*-
+
+====================
+Available formatters
+====================
+
+This page lists all builtin formatters.
+
+Common options
+==============
+
+All formatters support these options:
+
+`encoding`
+ If given, must be an encoding name (such as ``"utf-8"``). This will
+ be used to convert the token strings (which are Unicode strings)
+ to byte strings in the output (default: ``None``).
+ It will also be written in an encoding declaration suitable for the
+ document format if the `full` option is given (e.g. a ``meta
+ content-type`` directive in HTML or an invocation of the `inputenc`
+ package in LaTeX).
+
+ If this is ``""`` or ``None``, Unicode strings will be written
+ to the output file, which most file-like objects do not support.
+ For example, `pygments.highlight()` will return a Unicode string if
+ called with no `outfile` argument and a formatter that has `encoding`
+ set to ``None`` because it uses a `StringIO.StringIO` object that
+ supports Unicode arguments to `write()`. Using a regular file object
+ wouldn't work.
+
+ .. versionadded:: 0.6
+
+`outencoding`
+ When using Pygments from the command line, any `encoding` option given is
+ passed to the lexer and the formatter. This is sometimes not desirable,
+ for example if you want to set the input encoding to ``"guess"``.
+ Therefore, `outencoding` has been introduced which overrides `encoding`
+ for the formatter if given.
+
+ .. versionadded:: 0.7
+
+
+Formatter classes
+=================
+
+All these classes are importable from :mod:`pygments.formatters`.
+
+.. pygmentsdoc:: formatters
diff --git a/doc/docs/index.rst b/doc/docs/index.rst
new file mode 100644
index 0000000..d35fe6f
--- /dev/null
+++ b/doc/docs/index.rst
@@ -0,0 +1,64 @@
+Pygments documentation
+======================
+
+**Starting with Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ ../download
+ quickstart
+ cmdline
+
+**Builtin components**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexers
+ filters
+ formatters
+ styles
+
+**Reference**
+
+.. toctree::
+ :maxdepth: 1
+
+ unicode
+ tokens
+ api
+ terminal-sessions
+
+**Hacking for Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexerdevelopment
+ formatterdevelopment
+ filterdevelopment
+ styledevelopment
+ plugins
+
+**Hints and tricks**
+
+.. toctree::
+ :maxdepth: 1
+
+ rstdirective
+ moinmoin
+ java
+ integrate
+
+**About Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ changelog
+ authors
+ security
+
+If you find bugs or have suggestions for the documentation, please submit them
+on `GitHub <https://github.com/pygments/pygments>`_.
diff --git a/doc/docs/integrate.rst b/doc/docs/integrate.rst
new file mode 100644
index 0000000..2a030b7
--- /dev/null
+++ b/doc/docs/integrate.rst
@@ -0,0 +1,40 @@
+.. -*- mode: rst -*-
+
+===================================
+Using Pygments in various scenarios
+===================================
+
+Markdown
+--------
+
+Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code
+that uses Pygments to render source code in
+:file:`external/markdown-processor.py`. You can copy and adapt it to your
+liking.
+
+.. _Markdown: https://pypi.org/project/Markdown/
+
+TextMate
+--------
+
+Antonio Cangiano has created a Pygments bundle for TextMate that allows to
+colorize code via a simple menu option. It can be found here_.
+
+.. _here: https://programmingzen.com/pygments-textmate-bundle/
+
+Bash completion
+---------------
+
+The source distribution contains a file ``external/pygments.bashcomp`` that
+sets up completion for the ``pygmentize`` command in bash.
+
+Wrappers for other languages
+----------------------------
+
+These libraries provide Pygments highlighting for users of other languages
+than Python:
+
+* `pygments.rb <https://github.com/pygments/pygments.rb>`_, a pygments wrapper for Ruby
+* `Clygments <https://github.com/bfontaine/clygments>`_, a pygments wrapper for
+ Clojure
+* `PHPygments <https://github.com/capynet/PHPygments>`_, a pygments wrapper for PHP
diff --git a/doc/docs/java.rst b/doc/docs/java.rst
new file mode 100644
index 0000000..a8a5beb
--- /dev/null
+++ b/doc/docs/java.rst
@@ -0,0 +1,70 @@
+=====================
+Use Pygments in Java
+=====================
+
+Thanks to `Jython <https://www.jython.org/>`_ it is possible to use Pygments in
+Java.
+
+This page is a simple tutorial to get an idea of how this works. You can
+then look at the `Jython documentation <https://jython.readthedocs.io/en/latest/>`_ for more
+advanced uses.
+
+Since version 1.5, Pygments is deployed on `Maven Central
+<https://repo1.maven.org/maven2/org/pygments/pygments/>`_ as a JAR, as is Jython
+which makes it a lot easier to create a Java project.
+
+Here is an example of a `Maven <https://maven.apache.org/>`_ ``pom.xml`` file for a
+project running Pygments:
+
+.. sourcecode:: xml
+
+ <?xml version="1.0" encoding="UTF-8"?>
+
+ <project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>example</groupId>
+ <artifactId>example</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <dependencies>
+ <dependency>
+ <groupId>org.python</groupId>
+ <artifactId>jython-standalone</artifactId>
+ <version>2.5.3</version>
+ </dependency>
+ <dependency>
+ <groupId>org.pygments</groupId>
+ <artifactId>pygments</artifactId>
+ <version>1.5</version>
+ <scope>runtime</scope>
+ </dependency>
+ </dependencies>
+ </project>
+
+The following Java example:
+
+.. sourcecode:: java
+
+ PythonInterpreter interpreter = new PythonInterpreter();
+
+ // Set a variable with the content you want to work with
+ interpreter.set("code", code);
+
+ // Simple use Pygments as you would in Python
+ interpreter.exec("from pygments import highlight\n"
+ + "from pygments.lexers import PythonLexer\n"
+ + "from pygments.formatters import HtmlFormatter\n"
+ + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())");
+
+ // Get the result that has been set in a variable
+ System.out.println(interpreter.get("result", String.class));
+
+will print something like:
+
+.. sourcecode:: html
+
+ <div class="highlight">
+ <pre><span class="k">print</span> <span class="s">&quot;Hello World&quot;</span></pre>
+ </div>
diff --git a/doc/docs/lexerdevelopment.rst b/doc/docs/lexerdevelopment.rst
new file mode 100644
index 0000000..354b1d4
--- /dev/null
+++ b/doc/docs/lexerdevelopment.rst
@@ -0,0 +1,748 @@
+.. -*- mode: rst -*-
+
+.. highlight:: python
+
+====================
+Write your own lexer
+====================
+
+If a lexer for your favorite language is missing in the Pygments package, you
+can easily write your own and extend Pygments.
+
+All you need can be found inside the :mod:`pygments.lexer` module. As you can
+read in the :doc:`API documentation <api>`, a lexer is a class that is
+initialized with some keyword arguments (the lexer options) and that provides a
+:meth:`.get_tokens_unprocessed()` method which is given a string or unicode
+object with the data to lex.
+
+The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable
+containing tuples in the form ``(index, token, value)``. Normally you don't
+need to do this since there are base lexers that do most of the work and that
+you can subclass.
+
+RegexLexer
+==========
+
+The lexer base class used by almost all of Pygments' lexers is the
+:class:`RegexLexer`. This class allows you to define lexing rules in terms of
+*regular expressions* for different *states*.
+
+States are groups of regular expressions that are matched against the input
+string at the *current position*. If one of these expressions matches, a
+corresponding action is performed (such as yielding a token with a specific
+type, or changing state), the current position is set to where the last match
+ended and the matching process continues with the first regex of the current
+state.
+
+Lexer states are kept on a stack: each time a new state is entered, the new
+state is pushed onto the stack. The most basic lexers (like the `DiffLexer`)
+just need one state.
+
+Each state is defined as a list of tuples in the form (`regex`, `action`,
+`new_state`) where the last item is optional. In the most basic form, `action`
+is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a
+token with the match text and type `tokentype` and push `new_state` on the state
+stack. If the new state is ``'#pop'``, the topmost state is popped from the
+stack instead. To pop more than one state, use ``'#pop:2'`` and so on.
+``'#push'`` is a synonym for pushing a second time the current state on top of
+the stack.
+
+The following example shows the `DiffLexer` from the builtin lexers. Note that
+it contains some additional attributes `name`, `aliases` and `filenames` which
+aren't required for a lexer. They are used by the builtin lexer lookup
+functions. ::
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class DiffLexer(RegexLexer):
+ name = 'Diff'
+ aliases = ['diff']
+ filenames = ['*.diff']
+
+ tokens = {
+ 'root': [
+ (r' .*\n', Text),
+ (r'\+.*\n', Generic.Inserted),
+ (r'-.*\n', Generic.Deleted),
+ (r'@.*\n', Generic.Subheading),
+ (r'Index.*\n', Generic.Heading),
+ (r'=.*\n', Generic.Heading),
+ (r'.*\n', Text),
+ ]
+ }
+
+As you can see this lexer only uses one state. When the lexer starts scanning
+the text, it first checks if the current character is a space. If this is true
+it scans everything until newline and returns the data as a `Text` token (which
+is the "no special highlighting" token).
+
+If this rule doesn't match, it checks if the current char is a plus sign. And
+so on.
+
+If no rule matches at the current position, the current char is emitted as an
+`Error` token that indicates a lexing error, and the position is increased by
+one.
+
+
+Adding and testing a new lexer
+==============================
+
+The easiest way to use a new lexer is to use Pygments' support for loading
+the lexer from a file relative to your current directory.
+
+First, change the name of your lexer class to CustomLexer:
+
+.. code-block:: python
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class CustomLexer(RegexLexer):
+ """All your lexer code goes here!"""
+
+Then you can load and test the lexer from the command line with the additional
+flag ``-x``:
+
+.. code-block:: console
+
+ $ python -m pygments -x -l your_lexer_file.py <inputfile>
+
+To specify a class name other than CustomLexer, append it with a colon:
+
+.. code-block:: console
+
+ $ python -m pygments -x -l your_lexer.py:SomeLexer <inputfile>
+
+Or, using the Python API:
+
+.. code-block:: python
+
+ # For a lexer named CustomLexer
+ your_lexer = load_lexer_from_file(filename, **options)
+
+ # For a lexer named MyNewLexer
+ your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options)
+
+When loading custom lexers and formatters, be extremely careful to use only
+trusted files; Pygments will perform the equivalent of ``eval`` on them.
+
+If you only want to use your lexer with the Pygments API, you can import and
+instantiate the lexer yourself, then pass it to :func:`pygments.highlight`.
+
+Use the ``-f`` flag to select a different output format than terminal
+escape sequences. The :class:`pygments.formatters.html.HtmlFormatter` helps
+you with debugging your lexer. You can use the ``debug_token_types`` option
+to display the token types assigned to each part of your input file:
+
+.. code-block:: console
+
+ $ python -m pygments -x -f html -Ofull,debug_token_types -l your_lexer.py:SomeLexer <inputfile>
+
+Hover over each token to see the token type displayed as a tooltip.
+
+To prepare your new lexer for inclusion in the Pygments distribution, so that it
+will be found when passing filenames or lexer aliases from the command line, you
+have to perform the following steps.
+
+First, change to the current directory containing the Pygments source code. You
+will need to have either an unpacked source tarball, or (preferably) a copy
+cloned from GitHub.
+
+.. code-block:: console
+
+ $ cd pygments
+
+Select a matching module under ``pygments/lexers``, or create a new module for
+your lexer class.
+
+.. note::
+
+ We encourage you to put your lexer class into its own module, unless it's a
+ very small derivative of an already existing lexer.
+
+Next, make sure the lexer is known from outside of the module. All modules in
+the ``pygments.lexers`` package specify ``__all__``. For example,
+``esoteric.py`` sets::
+
+ __all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
+
+Add the name of your lexer class to this list (or create the list if your lexer
+is the only class in the module).
+
+Finally the lexer can be made publicly known by rebuilding the lexer mapping.
+In the root directory of the source (where the ``Makefile`` is located), run:
+
+.. code-block:: console
+
+ $ make mapfiles
+
+To test the new lexer, store an example file in
+``tests/examplefiles/<alias>``. For example, to test your
+``DiffLexer``, add a ``tests/examplefiles/diff/example.diff`` containing a
+sample diff output. To (re)generate the lexer output which the file is checked
+against, use the command ``pytest tests/examplefiles/diff --update-goldens``.
+
+Now you can use ``python -m pygments`` from the current root of the checkout to
+render your example to HTML:
+
+.. code-block:: console
+
+ $ python -m pygments -O full -f html -o /tmp/example.html tests/examplefiles/diff/example.diff
+
+Note that this explicitly calls the ``pygments`` module in the current
+directory. This ensures your modifications are used. Otherwise a possibly
+already installed, unmodified version without your new lexer would have been
+called from the system search path (``$PATH``).
+
+To view the result, open ``/tmp/example.html`` in your browser.
+
+Once the example renders as expected, you should run the complete test suite:
+
+.. code-block:: console
+
+ $ make test
+
+It also tests that your lexer fulfills the lexer API and certain invariants,
+such as that the concatenation of all token text is the same as the input text.
+
+
+Regex Flags
+===========
+
+You can either define regex flags locally in the regex (``r'(?x)foo bar'``) or
+globally by adding a `flags` attribute to your lexer class. If no attribute is
+defined, it defaults to `re.MULTILINE`. For more information about regular
+expression flags see the page about `regular expressions`_ in the Python
+documentation.
+
+.. _regular expressions: https://docs.python.org/library/re.html#regular-expression-syntax
+
+
+Scanning multiple tokens at once
+================================
+
+So far, the `action` element in the rule tuple of regex, action and state has
+been a single token type. Now we look at the first of several other possible
+values.
+
+Here is a more complex lexer that highlights INI files. INI files consist of
+sections, comments and ``key = value`` pairs::
+
+ from pygments.lexer import RegexLexer, bygroups
+ from pygments.token import *
+
+ class IniLexer(RegexLexer):
+ name = 'INI'
+ aliases = ['ini', 'cfg']
+ filenames = ['*.ini', '*.cfg']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r';.*?$', Comment),
+ (r'\[.*?\]$', Keyword),
+ (r'(.*?)(\s*)(=)(\s*)(.*?)$',
+ bygroups(Name.Attribute, Text, Operator, Text, String))
+ ]
+ }
+
+The lexer first looks for whitespace, comments and section names. Later it
+looks for a line that looks like a key, value pair, separated by an ``'='``
+sign, and optional whitespace.
+
+The `bygroups` helper yields each capturing group in the regex with a different
+token type. First the `Name.Attribute` token, then a `Text` token for the
+optional whitespace, after that a `Operator` token for the equals sign. Then a
+`Text` token for the whitespace again. The rest of the line is returned as
+`String`.
+
+Note that for this to work, every part of the match must be inside a capturing
+group (a ``(...)``), and there must not be any nested capturing groups. If you
+nevertheless need a group, use a non-capturing group defined using this syntax:
+``(?:some|words|here)`` (note the ``?:`` after the beginning parenthesis).
+
+If you find yourself needing a capturing group inside the regex which shouldn't
+be part of the output but is used in the regular expressions for backreferencing
+(eg: ``r'(<(foo|bar)>)(.*?)(</\2>)'``), you can pass `None` to the bygroups
+function and that group will be skipped in the output.
+
+
+Changing states
+===============
+
+Many lexers need multiple states to work as expected. For example, some
+languages allow multiline comments to be nested. Since this is a recursive
+pattern it's impossible to lex just using regular expressions.
+
+Here is a lexer that recognizes C++ style comments (multi-line with ``/* */``
+and single-line with ``//`` until end of line)::
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class CppCommentLexer(RegexLexer):
+ name = 'Example Lexer with states'
+
+ tokens = {
+ 'root': [
+ (r'[^/]+', Text),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*?$', Comment.Singleline),
+ (r'/', Text)
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ]
+ }
+
+This lexer starts lexing in the ``'root'`` state. It tries to match as much as
+possible until it finds a slash (``'/'``). If the next character after the slash
+is an asterisk (``'*'``) the `RegexLexer` sends those two characters to the
+output stream marked as `Comment.Multiline` and continues lexing with the rules
+defined in the ``'comment'`` state.
+
+If there wasn't an asterisk after the slash, the `RegexLexer` checks if it's a
+Singleline comment (i.e. followed by a second slash). If this also wasn't the
+case it must be a single slash, which is not a comment starter (the separate
+regex for a single slash must also be given, else the slash would be marked as
+an error token).
+
+Inside the ``'comment'`` state, we do the same thing again. Scan until the
+lexer finds a star or slash. If it's the opening of a multiline comment, push
+the ``'comment'`` state on the stack and continue scanning, again in the
+``'comment'`` state. Else, check if it's the end of the multiline comment. If
+yes, pop one state from the stack.
+
+Note: If you pop from an empty stack you'll get an `IndexError`. (There is an
+easy way to prevent this from happening: don't ``'#pop'`` in the root state).
+
+If the `RegexLexer` encounters a newline that is flagged as an error token, the
+stack is emptied and the lexer continues scanning in the ``'root'`` state. This
+can help producing error-tolerant highlighting for erroneous input, e.g. when a
+single-line string is not closed.
+
+
+Advanced state tricks
+=====================
+
+There are a few more things you can do with states:
+
+- You can push multiple states onto the stack if you give a tuple instead of a
+ simple string as the third item in a rule tuple. For example, if you want to
+ match a comment containing a directive, something like:
+
+ .. code-block:: text
+
+ /* <processing directive> rest of comment */
+
+ you can use this rule::
+
+ tokens = {
+ 'root': [
+ (r'/\* <', Comment, ('comment', 'directive')),
+ ...
+ ],
+ 'directive': [
+ (r'[^>]+', Comment.Directive),
+ (r'>', Comment, '#pop'),
+ ],
+ 'comment': [
+ (r'[^*]+', Comment),
+ (r'\*/', Comment, '#pop'),
+ (r'\*', Comment),
+ ]
+ }
+
+ When this encounters the above sample, first ``'comment'`` and ``'directive'``
+ are pushed onto the stack, then the lexer continues in the directive state
+ until it finds the closing ``>``, then it continues in the comment state until
+ the closing ``*/``. Then, both states are popped from the stack again and
+ lexing continues in the root state.
+
+ .. versionadded:: 0.9
+ The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not
+ ``'#pop:n'``) directives.
+
+
+- You can include the rules of a state in the definition of another. This is
+ done by using `include` from `pygments.lexer`::
+
+ from pygments.lexer import RegexLexer, bygroups, include
+ from pygments.token import *
+
+ class ExampleLexer(RegexLexer):
+ tokens = {
+ 'comments': [
+ (r'(?s)/\*.*?\*/', Comment),
+ (r'//.*?\n', Comment),
+ ],
+ 'root': [
+ include('comments'),
+ (r'(function)( )(\w+)( )({)',
+ bygroups(Keyword, Whitespace, Name, Whitespace, Punctuation), 'function'),
+ (r'.*\n', Text),
+ ],
+ 'function': [
+ (r'[^}/]+', Text),
+ include('comments'),
+ (r'/', Text),
+ (r'\}', Punctuation, '#pop'),
+ ]
+ }
+
+ This is a hypothetical lexer for a language that consist of functions and
+ comments. Because comments can occur at toplevel and in functions, we need
+ rules for comments in both states. As you can see, the `include` helper saves
+ repeating rules that occur more than once (in this example, the state
+ ``'comment'`` will never be entered by the lexer, as it's only there to be
+ included in ``'root'`` and ``'function'``).
+
+- Sometimes, you may want to "combine" a state from existing ones. This is
+ possible with the `combined` helper from `pygments.lexer`.
+
+ If you, instead of a new state, write ``combined('state1', 'state2')`` as the
+ third item of a rule tuple, a new anonymous state will be formed from state1
+ and state2 and if the rule matches, the lexer will enter this state.
+
+ This is not used very often, but can be helpful in some cases, such as the
+ `PythonLexer`'s string literal processing.
+
+- If you want your lexer to start lexing in a different state you can modify the
+ stack by overriding the `get_tokens_unprocessed()` method::
+
+ from pygments.lexer import RegexLexer
+
+ class ExampleLexer(RegexLexer):
+ tokens = {...}
+
+ def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')):
+ for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
+ yield item
+
+ Some lexers like the `PhpLexer` use this to make the leading ``<?php``
+ preprocessor comments optional. Note that you can crash the lexer easily by
+ putting values into the stack that don't exist in the token map. Also
+ removing ``'root'`` from the stack can result in strange errors!
+
+- In some lexers, a state should be popped if anything is encountered that isn't
+ matched by a rule in the state. You could use an empty regex at the end of
+ the state list, but Pygments provides a more obvious way of spelling that:
+ ``default('#pop')`` is equivalent to ``('', Text, '#pop')``.
+
+ .. versionadded:: 2.0
+
+
+Subclassing lexers derived from RegexLexer
+==========================================
+
+.. versionadded:: 1.6
+
+Sometimes multiple languages are very similar, but should still be lexed by
+different lexer classes.
+
+When subclassing a lexer derived from RegexLexer, the ``tokens`` dictionaries
+defined in the parent and child class are merged. For example::
+
+ from pygments.lexer import RegexLexer, inherit
+ from pygments.token import *
+
+ class BaseLexer(RegexLexer):
+ tokens = {
+ 'root': [
+ ('[a-z]+', Name),
+ (r'/\*', Comment, 'comment'),
+ ('"', String, 'string'),
+ (r'\s+', Text),
+ ],
+ 'string': [
+ ('[^"]+', String),
+ ('"', String, '#pop'),
+ ],
+ 'comment': [
+ ...
+ ],
+ }
+
+ class DerivedLexer(BaseLexer):
+ tokens = {
+ 'root': [
+ ('[0-9]+', Number),
+ inherit,
+ ],
+ 'string': [
+ (r'[^"\\]+', String),
+ (r'\\.', String.Escape),
+ ('"', String, '#pop'),
+ ],
+ }
+
+The `BaseLexer` defines two states, lexing names and strings. The
+`DerivedLexer` defines its own tokens dictionary, which extends the definitions
+of the base lexer:
+
+* The "root" state has an additional rule and then the special object `inherit`,
+ which tells Pygments to insert the token definitions of the parent class at
+ that point.
+
+* The "string" state is replaced entirely, since there is not `inherit` rule.
+
+* The "comment" state is inherited entirely.
+
+
+Using multiple lexers
+=====================
+
+Using multiple lexers for the same input can be tricky. One of the easiest
+combination techniques is shown here: You can replace the action entry in a rule
+tuple with a lexer class. The matched text will then be lexed with that lexer,
+and the resulting tokens will be yielded.
+
+For example, look at this stripped-down HTML lexer::
+
+ from pygments.lexer import RegexLexer, bygroups, using
+ from pygments.token import *
+ from pygments.lexers.javascript import JavascriptLexer
+
+ class HtmlLexer(RegexLexer):
+ name = 'HTML'
+ aliases = ['html']
+ filenames = ['*.html', '*.htm']
+
+ flags = re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ ('[^<&]+', Text),
+ ('&.*?;', Name.Entity),
+ (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
+ (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
+ ],
+ 'script-content': [
+ (r'(.+?)(<\s*/\s*script\s*>)',
+ bygroups(using(JavascriptLexer), Name.Tag),
+ '#pop'),
+ ]
+ }
+
+Here the content of a ``<script>`` tag is passed to a newly created instance of
+a `JavascriptLexer` and not processed by the `HtmlLexer`. This is done using
+the `using` helper that takes the other lexer class as its parameter.
+
+Note the combination of `bygroups` and `using`. This makes sure that the
+content up to the ``</script>`` end tag is processed by the `JavascriptLexer`,
+while the end tag is yielded as a normal token with the `Name.Tag` type.
+
+Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule.
+Here, two states are pushed onto the state stack, ``'script-content'`` and
+``'tag'``. That means that first ``'tag'`` is processed, which will lex
+attributes and the closing ``>``, then the ``'tag'`` state is popped and the
+next state on top of the stack will be ``'script-content'``.
+
+Since you cannot refer to the class currently being defined, use `this`
+(imported from `pygments.lexer`) to refer to the current lexer class, i.e.
+``using(this)``. This construct may seem unnecessary, but this is often the
+most obvious way of lexing arbitrary syntax between fixed delimiters without
+introducing deeply nested states.
+
+The `using()` helper has a special keyword argument, `state`, which works as
+follows: if given, the lexer to use initially is not in the ``"root"`` state,
+but in the state given by this argument. This does not work with advanced
+`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below).
+
+Any other keywords arguments passed to `using()` are added to the keyword
+arguments used to create the lexer.
+
+
+Delegating Lexer
+================
+
+Another approach for nested lexers is the `DelegatingLexer` which is for example
+used for the template engine lexers. It takes two lexers as arguments on
+initialisation: a `root_lexer` and a `language_lexer`.
+
+The input is processed as follows: First, the whole text is lexed with the
+`language_lexer`. All tokens yielded with the special type of ``Other`` are
+then concatenated and given to the `root_lexer`. The language tokens of the
+`language_lexer` are then inserted into the `root_lexer`'s token stream at the
+appropriate positions. ::
+
+ from pygments.lexer import DelegatingLexer
+ from pygments.lexers.web import HtmlLexer, PhpLexer
+
+ class HtmlPhpLexer(DelegatingLexer):
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, PhpLexer, **options)
+
+This procedure ensures that e.g. HTML with template tags in it is highlighted
+correctly even if the template tags are put into HTML tags or attributes.
+
+If you want to change the needle token ``Other`` to something else, you can give
+the lexer another token type as the third parameter::
+
+ DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
+
+
+Callbacks
+=========
+
+Sometimes the grammar of a language is so complex that a lexer would be unable
+to process it just by using regular expressions and stacks.
+
+For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead
+of token types (`bygroups` and `using` are nothing else but preimplemented
+callbacks). The callback must be a function taking two arguments:
+
+* the lexer itself
+* the match object for the last matched rule
+
+The callback must then return an iterable of (or simply yield) ``(index,
+tokentype, value)`` tuples, which are then just passed through by
+`get_tokens_unprocessed()`. The ``index`` here is the position of the token in
+the input string, ``tokentype`` is the normal token type (like `Name.Builtin`),
+and ``value`` the associated part of the input string.
+
+You can see an example here::
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import Generic
+
+ class HypotheticLexer(RegexLexer):
+
+ def headline_callback(lexer, match):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+If the regex for the `headline_callback` matches, the function is called with
+the match object. Note that after the callback is done, processing continues
+normally, that is, after the end of the previous match. The callback has no
+possibility to influence the position.
+
+There are not really any simple examples for lexer callbacks, but you can see
+them in action e.g. in the `SMLLexer` class in `ml.py`_.
+
+.. _ml.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ml.py
+
+
+The ExtendedRegexLexer class
+============================
+
+The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for
+the funky syntax rules of languages such as Ruby.
+
+But fear not; even then you don't have to abandon the regular expression
+approach: Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`.
+All features known from RegexLexers are available here too, and the tokens are
+specified in exactly the same way, *except* for one detail:
+
+The `get_tokens_unprocessed()` method holds its internal state data not as local
+variables, but in an instance of the `pygments.lexer.LexerContext` class, and
+that instance is passed to callbacks as a third argument. This means that you
+can modify the lexer state in callbacks.
+
+The `LexerContext` class has the following members:
+
+* `text` -- the input text
+* `pos` -- the current starting position that is used for matching regexes
+* `stack` -- a list containing the state stack
+* `end` -- the maximum position to which regexes are matched, this defaults to
+ the length of `text`
+
+Additionally, the `get_tokens_unprocessed()` method can be given a
+`LexerContext` instead of a string and will then process this context instead of
+creating a new one for the string argument.
+
+Note that because you can set the current position to anything in the callback,
+it won't be automatically be set by the caller after the callback is finished.
+For example, this is how the hypothetical lexer above would be written with the
+`ExtendedRegexLexer`::
+
+ from pygments.lexer import ExtendedRegexLexer
+ from pygments.token import Generic
+
+ class ExHypotheticLexer(ExtendedRegexLexer):
+
+ def headline_callback(lexer, match, ctx):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+ ctx.pos = match.end()
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+This might sound confusing (and it can really be). But it is needed, and for an
+example look at the Ruby lexer in `ruby.py`_.
+
+.. _ruby.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ruby.py
+
+
+Handling Lists of Keywords
+==========================
+
+For a relatively short list (hundreds) you can construct an optimized regular
+expression directly using ``words()`` (longer lists, see next section). This
+function handles a few things for you automatically, including escaping
+metacharacters and Python's first-match rather than longest-match in
+alternations. Feel free to put the lists themselves in
+``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by
+code if possible.
+
+An example of using ``words()`` is something like::
+
+ from pygments.lexer import RegexLexer, words, Name
+
+ class MyLexer(RegexLexer):
+
+ tokens = {
+ 'root': [
+ (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin),
+ (r'\w+', Name),
+ ],
+ }
+
+As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed
+regex.
+
+
+Modifying Token Streams
+=======================
+
+Some languages ship a lot of builtin functions (for example PHP). The total
+amount of those functions differs from system to system because not everybody
+has every extension installed. In the case of PHP there are over 3000 builtin
+functions. That's an incredibly huge amount of functions, much more than you
+want to put into a regular expression.
+
+But because only `Name` tokens can be function names this is solvable by
+overriding the ``get_tokens_unprocessed()`` method. The following lexer
+subclasses the `PythonLexer` so that it highlights some additional names as
+pseudo keywords::
+
+ from pygments.lexers.python import PythonLexer
+ from pygments.token import Name, Keyword
+
+ class MyPythonLexer(PythonLexer):
+ EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs'))
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
diff --git a/doc/docs/lexers.rst b/doc/docs/lexers.rst
new file mode 100644
index 0000000..446c5a9
--- /dev/null
+++ b/doc/docs/lexers.rst
@@ -0,0 +1,69 @@
+.. -*- mode: rst -*-
+
+================
+Available lexers
+================
+
+This page lists all available builtin lexers and the options they take.
+
+Currently, **all lexers** support these options:
+
+`stripnl`
+ Strip leading and trailing newlines from the input (default: ``True``)
+
+`stripall`
+ Strip all leading and trailing whitespace from the input (default:
+ ``False``).
+
+`ensurenl`
+ Make sure that the input ends with a newline (default: ``True``). This
+ is required for some lexers that consume input linewise.
+
+ .. versionadded:: 1.3
+
+`tabsize`
+ If given and greater than 0, expand tabs in the input (default: ``0``).
+
+`encoding`
+ If given, must be an encoding name (such as ``"utf-8"``). This encoding
+ will be used to convert the input string to Unicode (if it is not already
+ a Unicode string). The default is ``"guess"``.
+
+ If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1
+ detection is used, if it is set to ``"chardet"``, the
+ `chardet library <https://chardet.github.io/>`_ is used to
+ guess the encoding of the input.
+
+ .. versionadded:: 0.6
+
+
+The "Short Names" field lists the identifiers that can be used with the
+`get_lexer_by_name()` function.
+
+These lexers are builtin and can be imported from `pygments.lexers`:
+
+.. pygmentsdoc:: lexers
+
+
+Iterating over all lexers
+-------------------------
+
+.. versionadded:: 0.6
+
+To get all lexers (both the builtin and the plugin ones), you can
+use the `get_all_lexers()` function from the `pygments.lexers`
+module:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.lexers import get_all_lexers
+ >>> i = get_all_lexers()
+ >>> i.next()
+ ('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch'))
+ >>> i.next()
+ ('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',))
+ >>> i.next()
+ ('XML+Ruby', ('xml+ruby', 'xml+erb'), (), ())
+
+As you can see, the return value is an iterator which yields tuples
+in the form ``(name, aliases, filetypes, mimetypes)``.
diff --git a/doc/docs/moinmoin.rst b/doc/docs/moinmoin.rst
new file mode 100644
index 0000000..80ed25c
--- /dev/null
+++ b/doc/docs/moinmoin.rst
@@ -0,0 +1,39 @@
+.. -*- mode: rst -*-
+
+============================
+Using Pygments with MoinMoin
+============================
+
+From Pygments 0.7, the source distribution ships a `Moin`_ parser plugin that
+can be used to get Pygments highlighting in Moin wiki pages.
+
+To use it, copy the file `external/moin-parser.py` from the Pygments
+distribution to the `data/plugin/parser` subdirectory of your Moin instance.
+Edit the options at the top of the file (currently ``ATTACHMENTS`` and
+``INLINESTYLES``) and rename the file to the name that the parser directive
+should have. For example, if you name the file ``code.py``, you can get a
+highlighted Python code sample with this Wiki markup::
+
+ {{{
+ #!code python
+ [...]
+ }}}
+
+where ``python`` is the Pygments name of the lexer to use.
+
+Additionally, if you set the ``ATTACHMENTS`` option to True, Pygments will also
+be called for all attachments for whose filenames there is no other parser
+registered.
+
+You are responsible for including CSS rules that will map the Pygments CSS
+classes to colors. You can output a stylesheet file with `pygmentize`, put it
+into the `htdocs` directory of your Moin instance and then include it in the
+`stylesheets` configuration option in the Moin config, e.g.::
+
+ stylesheets = [('screen', '/htdocs/pygments.css')]
+
+If you do not want to do that and are willing to accept larger HTML output, you
+can set the ``INLINESTYLES`` option to True.
+
+
+.. _Moin: https://moinmo.in/
diff --git a/doc/docs/plugins.rst b/doc/docs/plugins.rst
new file mode 100644
index 0000000..6738860
--- /dev/null
+++ b/doc/docs/plugins.rst
@@ -0,0 +1,122 @@
+=======
+Plugins
+=======
+
+If you want to extend Pygments without hacking the sources, but want to
+use the lexer/formatter/style/filter lookup functions (`lexers.get_lexer_by_name`
+et al.), you can use `setuptools`_ entrypoints to add new lexers, formatters
+or styles as if they were in the Pygments core.
+
+.. _setuptools: https://pypi.org/project/setuptools/
+
+That means you can use your highlighter modules with the `pygmentize` script,
+which relies on the mentioned functions.
+
+
+Plugin discovery
+================
+
+At runtime, discovering plugins is preferentially done using Python's
+standard library module `importlib.metadata`_, available in Python 3.8
+and higher. In earlier Python versions, Pygments attempts to use the
+`importlib_metadata`_ backport, if available. If not available, a
+fallback is attempted on the older `pkg_resources`_ module. Finally, if
+``pkg_resources`` is not available, no plugins will be loaded at
+all. Note that ``pkg_resources`` is distributed with `setuptools`_, and
+thus available on most Python environments. However, ``pkg_resources``
+is considerably slower than ``importlib.metadata`` or its
+``importlib_metadata`` backport. For this reason, if you run Pygments
+under Python older than 3.8, it is recommended to install
+``importlib-metadata``. Pygments defines a ``plugins`` packaging extra,
+so you can ensure it is installed with best plugin support (i.e., that
+``importlib-metadata`` is also installed in case you are running Python
+earlier than 3.8) by specifying ``pygments[plugins]`` as the
+requirement, for example, with ``pip``:
+
+.. sourcecode:: shell
+
+ $ python -m pip install --user pygments[plugins]
+
+.. _importlib.metadata: https://docs.python.org/3.10/library/importlib.metadata.html
+.. _importlib_metadata: https://pypi.org/project/importlib-metadata
+.. _pkg_resources: https://setuptools.pypa.io/en/latest/pkg_resources.html
+
+
+Defining plugins through entrypoints
+====================================
+
+Here is a list of setuptools entrypoints that Pygments understands:
+
+`pygments.lexers`
+
+ This entrypoint is used for adding new lexers to the Pygments core.
+ The name of the entrypoint values doesn't really matter, Pygments extracts
+ required metadata from the class definition:
+
+ .. sourcecode:: ini
+
+ [pygments.lexers]
+ yourlexer = yourmodule:YourLexer
+
+ Note that you have to define ``name``, ``aliases`` and ``filename``
+ attributes so that you can use the highlighter from the command line:
+
+ .. sourcecode:: python
+
+ class YourLexer(...):
+ name = 'Name Of Your Lexer'
+ aliases = ['alias']
+ filenames = ['*.ext']
+
+
+`pygments.formatters`
+
+ You can use this entrypoint to add new formatters to Pygments. The
+ name of an entrypoint item is the name of the formatter. If you
+ prefix the name with a slash it's used as a filename pattern:
+
+ .. sourcecode:: ini
+
+ [pygments.formatters]
+ yourformatter = yourmodule:YourFormatter
+ /.ext = yourmodule:YourFormatter
+
+
+`pygments.styles`
+
+ To add a new style you can use this entrypoint. The name of the entrypoint
+ is the name of the style:
+
+ .. sourcecode:: ini
+
+ [pygments.styles]
+ yourstyle = yourmodule:YourStyle
+
+
+`pygments.filters`
+
+ Use this entrypoint to register a new filter. The name of the
+ entrypoint is the name of the filter:
+
+ .. sourcecode:: ini
+
+ [pygments.filters]
+ yourfilter = yourmodule:YourFilter
+
+
+How To Use Entrypoints
+======================
+
+This documentation doesn't explain how to use those entrypoints because this is
+covered in the `setuptools documentation`_. That page should cover everything
+you need to write a plugin.
+
+.. _setuptools documentation: https://setuptools.readthedocs.io/en/latest/
+
+
+Extending The Core
+==================
+
+If you have written a Pygments plugin that is open source, please inform us
+about that. There is a high chance that we'll add it to the Pygments
+distribution.
diff --git a/doc/docs/quickstart.rst b/doc/docs/quickstart.rst
new file mode 100644
index 0000000..b2a9c29
--- /dev/null
+++ b/doc/docs/quickstart.rst
@@ -0,0 +1,205 @@
+.. -*- mode: rst -*-
+
+===========================
+Introduction and Quickstart
+===========================
+
+
+Welcome to Pygments! This document explains the basic concepts and terms and
+gives a few examples of how to use the library.
+
+
+Architecture
+============
+
+There are four types of components that work together highlighting a piece of
+code:
+
+* A **lexer** splits the source into tokens, fragments of the source that
+ have a token type that determines what the text represents semantically
+ (e.g., keyword, string, or comment). There is a lexer for every language
+ or markup format that Pygments supports.
+* The token stream can be piped through **filters**, which usually modify
+ the token types or text fragments, e.g. uppercasing all keywords.
+* A **formatter** then takes the token stream and writes it to an output
+ file, in a format such as HTML, LaTeX or RTF.
+* While writing the output, a **style** determines how to highlight all the
+ different token types. It maps them to attributes like "red and bold".
+
+
+Example
+=======
+
+Here is a small example for highlighting Python code:
+
+.. sourcecode:: python
+
+ from pygments import highlight
+ from pygments.lexers import PythonLexer
+ from pygments.formatters import HtmlFormatter
+
+ code = 'print "Hello World"'
+ print(highlight(code, PythonLexer(), HtmlFormatter()))
+
+which prints something like this:
+
+.. sourcecode:: html
+
+ <div class="highlight">
+ <pre><span class="k">print</span> <span class="s">&quot;Hello World&quot;</span></pre>
+ </div>
+
+As you can see, Pygments uses CSS classes (by default, but you can change that)
+instead of inline styles in order to avoid outputting redundant style information over
+and over. A CSS stylesheet that contains all CSS classes possibly used in the output
+can be produced by:
+
+.. sourcecode:: python
+
+ print(HtmlFormatter().get_style_defs('.highlight'))
+
+The argument to :func:`get_style_defs` is used as an additional CSS selector:
+the output may look like this:
+
+.. sourcecode:: css
+
+ .highlight .k { color: #AA22FF; font-weight: bold }
+ .highlight .s { color: #BB4444 }
+ ...
+
+
+Options
+=======
+
+The :func:`highlight()` function supports a fourth argument called *outfile*, it
+must be a file object if given. The formatted output will then be written to
+this file instead of being returned as a string.
+
+Lexers and formatters both support options. They are given to them as keyword
+arguments either to the class or to the lookup method:
+
+.. sourcecode:: python
+
+ from pygments import highlight
+ from pygments.lexers import get_lexer_by_name
+ from pygments.formatters import HtmlFormatter
+
+ lexer = get_lexer_by_name("python", stripall=True)
+ formatter = HtmlFormatter(linenos=True, cssclass="source")
+ result = highlight(code, lexer, formatter)
+
+This makes the lexer strip all leading and trailing whitespace from the input
+(`stripall` option), lets the formatter output line numbers (`linenos` option),
+and sets the wrapping ``<div>``'s class to ``source`` (instead of
+``highlight``).
+
+Important options include:
+
+`encoding` : for lexers and formatters
+ Since Pygments uses Unicode strings internally, this determines which
+ encoding will be used to convert to or from byte strings.
+`style` : for formatters
+ The name of the style to use when writing the output.
+
+
+For an overview of builtin lexers and formatters and their options, visit the
+:doc:`lexer <lexers>` and :doc:`formatters <formatters>` lists.
+
+For a documentation on filters, see :doc:`this page <filters>`.
+
+
+Lexer and formatter lookup
+==========================
+
+If you want to lookup a built-in lexer by its alias or a filename, you can use
+one of the following methods:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.lexers import (get_lexer_by_name,
+ ... get_lexer_for_filename, get_lexer_for_mimetype)
+
+ >>> get_lexer_by_name('python')
+ <pygments.lexers.PythonLexer>
+
+ >>> get_lexer_for_filename('spam.rb')
+ <pygments.lexers.RubyLexer>
+
+ >>> get_lexer_for_mimetype('text/x-perl')
+ <pygments.lexers.PerlLexer>
+
+All these functions accept keyword arguments; they will be passed to the lexer
+as options.
+
+A similar API is available for formatters: use :func:`.get_formatter_by_name()`
+and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters`
+module for this purpose.
+
+
+Guessing lexers
+===============
+
+If you don't know the content of the file, or you want to highlight a file
+whose extension is ambiguous, such as ``.html`` (which could contain plain HTML
+or some template tags), use these functions:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.lexers import guess_lexer, guess_lexer_for_filename
+
+ >>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"')
+ <pygments.lexers.PythonLexer>
+
+ >>> guess_lexer_for_filename('test.py', 'print "Hello World!"')
+ <pygments.lexers.PythonLexer>
+
+:func:`.guess_lexer()` passes the given content to the lexer classes'
+:meth:`analyse_text()` method and returns the one for which it returns the
+highest number.
+
+All lexers have two different filename pattern lists: the primary and the
+secondary one. The :func:`.get_lexer_for_filename()` function only uses the
+primary list, whose entries are supposed to be unique among all lexers.
+:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers
+and look at the primary and secondary filename patterns if the filename matches.
+If only one lexer matches, it is returned, else the guessing mechanism of
+:func:`.guess_lexer()` is used with the matching lexers.
+
+As usual, keyword arguments to these functions are given to the created lexer
+as options.
+
+
+Command line usage
+==================
+
+You can use Pygments from the command line, using the :program:`pygmentize`
+script::
+
+ $ pygmentize test.py
+
+will highlight the Python file test.py using ANSI escape sequences
+(a.k.a. terminal colors) and print the result to standard output.
+
+To output HTML, use the ``-f`` option::
+
+ $ pygmentize -f html -o test.html test.py
+
+to write an HTML-highlighted version of test.py to the file test.html.
+Note that it will only be a snippet of HTML, if you want a full HTML document,
+use the "full" option::
+
+ $ pygmentize -f html -O full -o test.html test.py
+
+This will produce a full HTML document with included stylesheet.
+
+A style can be selected with ``-O style=<name>``.
+
+If you need a stylesheet for an existing HTML file using Pygments CSS classes,
+it can be created with::
+
+ $ pygmentize -S default -f html > style.css
+
+where ``default`` is the style name.
+
+More options and tricks can be found in the :doc:`command line reference
+<cmdline>`.
diff --git a/doc/docs/rstdirective.rst b/doc/docs/rstdirective.rst
new file mode 100644
index 0000000..edc117d
--- /dev/null
+++ b/doc/docs/rstdirective.rst
@@ -0,0 +1,22 @@
+.. -*- mode: rst -*-
+
+================================
+Using Pygments in ReST documents
+================================
+
+Many Python people use `ReST`_ for documentation their sourcecode, programs,
+scripts et cetera. This also means that documentation often includes sourcecode
+samples or snippets.
+
+You can easily enable Pygments support for your ReST texts using a custom
+directive -- this is also how this documentation displays source code.
+
+From Pygments 0.9, the directive is shipped in the distribution as
+`external/rst-directive.py`. You can copy and adapt this code to your liking.
+
+.. removed -- too confusing
+ *Loosely related note:* The ReST lexer now recognizes ``.. sourcecode::`` and
+ ``.. code::`` directives and highlights the contents in the specified language
+ if the `handlecodeblocks` option is true.
+
+.. _ReST: https://docutils.sourceforge.io/rst.html
diff --git a/doc/docs/security.rst b/doc/docs/security.rst
new file mode 100644
index 0000000..72f2d05
--- /dev/null
+++ b/doc/docs/security.rst
@@ -0,0 +1,31 @@
+Security considerations
+-----------------------
+
+Pygments provides no guarantees on execution time, which needs to be taken
+into consideration when using Pygments to process arbitrary user inputs. For
+example, if you have a web service which uses Pygments for highlighting, there
+may be inputs which will cause the Pygments process to run "forever" and/or use
+significant amounts of memory. This can subsequently be used to perform a
+remote denial-of-service attack on the server if the processes are not
+terminated quickly.
+
+Unfortunately, it's practically impossible to harden Pygments itself against
+those issues: Some regular expressions can result in "catastrophic
+backtracking", but other bugs like incorrect matchers can also
+cause similar problems, and there is no way to find them in an automated fashion
+(short of solving the halting problem.) Pygments has extensive unit tests,
+automated randomized testing, and is also tested by `OSS-Fuzz <https://github.com/google/oss-fuzz/tree/master/projects/pygments>`_,
+but we will never be able to eliminate all bugs in this area.
+
+Our recommendations are:
+
+* Ensure that the Pygments process is *terminated* after a reasonably short
+ timeout. In general Pygments should take seconds at most for reasonably-sized
+ input.
+* *Limit* the number of concurrent Pygments processes to avoid oversubscription
+ of resources.
+
+The Pygments authors will treat any bug resulting in long processing times with
+high priority -- it's one of those things that will be fixed in a patch release.
+When reporting a bug where you suspect super-linear execution times, please make
+sure to attach an input to reproduce it. \ No newline at end of file
diff --git a/doc/docs/styledevelopment.rst b/doc/docs/styledevelopment.rst
new file mode 100644
index 0000000..8c4ec2d
--- /dev/null
+++ b/doc/docs/styledevelopment.rst
@@ -0,0 +1,96 @@
+.. -*- mode: rst -*-
+
+.. _creating-own-styles:
+
+Creating Own Styles
+===================
+
+So, how to create a style? All you have to do is to subclass `Style` and
+define some styles:
+
+.. sourcecode:: python
+
+ from pygments.style import Style
+ from pygments.token import Token, Comment, Keyword, Name, String, \
+ Error, Generic, Number, Operator
+
+
+ class YourStyle(Style):
+
+ styles = {
+ Token: '',
+ Comment: 'italic #888',
+ Keyword: 'bold #005',
+ Name: '#f00',
+ Name.Class: 'bold #0f0',
+ Name.Function: '#0f0',
+ String: 'bg:#eee #111'
+ }
+
+That's it, save it as ``your.py``. There are just a few rules. When you define a style for `Name`
+the style automatically also affects `Name.Function` and so on. If you
+defined ``'bold'`` and you don't want boldface for a subtoken use ``'nobold'``.
+
+(Philosophy: the styles aren't written in CSS syntax since this way
+they can be used for a variety of formatters.)
+
+``Token`` is the default style inherited by all token types.
+
+To make the style usable for Pygments, you must
+
+* either register it as a plugin (see :doc:`the plugin docs <plugins>`)
+* or update the ``pygments.styles`` subpackage directory. For example:
+
+ * add ``your.py`` file
+ * register the new style by adding a line to the ``__init__.py`` file:
+
+ .. sourcecode:: python
+
+ STYLE_MAP = {
+ ...
+ 'your': 'your::YourStyle',
+
+.. note::
+
+ You should *only* add it to the ``pygments.styles`` subdirectory if you are
+ working on a contribution to Pygments. You should not use that
+ method to extend an already existing copy of Pygments, use the plugins
+ mechanism for that.
+
+
+Style Rules
+===========
+
+Here a small overview of all allowed styles:
+
+``bold``
+ render text as bold
+``nobold``
+ don't render text as bold (to prevent subtokens being highlighted bold)
+``italic``
+ render text italic
+``noitalic``
+ don't render text as italic
+``underline``
+ render text underlined
+``nounderline``
+ don't render text underlined
+``bg:``
+ transparent background
+``bg:#000000``
+ background color (black)
+``border:``
+ no border
+``border:#ffffff``
+ border color (white)
+``#ff0000``
+ text color (red)
+``noinherit``
+ don't inherit styles from supertoken
+
+Note that there may not be a space between ``bg:`` and the color value
+since the style definition string is split at whitespace.
+Also, using named colors is not allowed since the supported color names
+vary for different formatters.
+
+Furthermore, not all lexers might support every style.
diff --git a/doc/docs/styles.rst b/doc/docs/styles.rst
new file mode 100644
index 0000000..91689d3
--- /dev/null
+++ b/doc/docs/styles.rst
@@ -0,0 +1,157 @@
+.. -*- mode: rst -*-
+
+======
+Styles
+======
+
+Pygments comes with :doc:`some builtin styles </styles/>` that work for both the
+HTML and LaTeX formatter.
+
+The builtin styles can be looked up with the `get_style_by_name` function:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.styles import get_style_by_name
+ >>> get_style_by_name('colorful')
+ <class 'pygments.styles.colorful.ColorfulStyle'>
+
+You can pass a instance of a `Style` class to a formatter as the `style`
+option in form of a string:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.styles import get_style_by_name
+ >>> from pygments.formatters import HtmlFormatter
+ >>> HtmlFormatter(style='colorful').style
+ <class 'pygments.styles.colorful.ColorfulStyle'>
+
+Or you can also import your own style (which must be a subclass of
+`pygments.style.Style`) and pass it to the formatter:
+
+.. sourcecode:: pycon
+
+ >>> from yourapp.yourmodule import YourStyle
+ >>> from pygments.formatters import HtmlFormatter
+ >>> HtmlFormatter(style=YourStyle).style
+ <class 'yourapp.yourmodule.YourStyle'>
+
+
+Creating Own Styles
+===================
+
+See :ref:`creating-own-styles`.
+
+
+Builtin Styles
+==============
+
+Pygments ships some builtin styles which are maintained by the Pygments team.
+
+To get a list of known styles you can use this snippet:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.styles import STYLE_MAP
+ >>> STYLE_MAP.keys()
+ ['default', 'emacs', 'friendly', 'colorful']
+
+
+Getting a list of available styles
+==================================
+
+.. versionadded:: 0.6
+
+Because it could be that a plugin registered a style, there is
+a way to iterate over all styles:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.styles import get_all_styles
+ >>> styles = list(get_all_styles())
+
+
+.. _AnsiTerminalStyle:
+
+Terminal Styles
+===============
+
+.. versionadded:: 2.2
+
+Custom styles used with the 256-color terminal formatter can also map colors to
+use the 8 default ANSI colors. To do so, use ``ansigreen``, ``ansibrightred`` or
+any other colors defined in :attr:`pygments.style.ansicolors`. Foreground ANSI
+colors will be mapped to the corresponding `escape codes 30 to 37
+<https://en.wikipedia.org/wiki/ANSI_escape_code#Colors>`_ thus respecting any
+custom color mapping and themes provided by many terminal emulators. Light
+variants are treated as foreground color with and an added bold flag.
+``bg:ansi<color>`` will also be respected, except the light variant will be the
+same shade as their dark variant.
+
+See the following example where the color of the string ``"hello world"`` is
+governed by the escape sequence ``\x1b[34;01m`` (Ansi bright blue, Bold, 41 being red
+background) instead of an extended foreground & background color.
+
+.. sourcecode:: pycon
+
+ >>> from pygments import highlight
+ >>> from pygments.style import Style
+ >>> from pygments.token import Token
+ >>> from pygments.lexers import Python3Lexer
+ >>> from pygments.formatters import Terminal256Formatter
+
+ >>> class MyStyle(Style):
+ styles = {
+ Token.String: 'ansibrightblue bg:ansibrightred',
+ }
+
+ >>> code = 'print("Hello World")'
+ >>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle))
+ >>> print(result.encode())
+ b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m'
+
+Colors specified using ``ansi*`` are converted to a default set of RGB colors
+when used with formatters other than the terminal-256 formatter.
+
+By definition of ANSI, the following colors are considered "light" colors, and
+will be rendered by most terminals as bold:
+
+- "brightblack" (darkgrey), "brightred", "brightgreen", "brightyellow", "brightblue",
+ "brightmagenta", "brightcyan", "white"
+
+The following are considered "dark" colors and will be rendered as non-bold:
+
+- "black", "red", "green", "yellow", "blue", "magenta", "cyan",
+ "gray"
+
+Exact behavior might depends on the terminal emulator you are using, and its
+settings.
+
+.. _new-ansi-color-names:
+
+.. versionchanged:: 2.4
+
+The definition of the ANSI color names has changed.
+New names are easier to understand and align to the colors used in other projects.
+
+===================== ====================
+New names Pygments up to 2.3
+===================== ====================
+``ansiblack`` ``#ansiblack``
+``ansired`` ``#ansidarkred``
+``ansigreen`` ``#ansidarkgreen``
+``ansiyellow`` ``#ansibrown``
+``ansiblue`` ``#ansidarkblue``
+``ansimagenta`` ``#ansipurple``
+``ansicyan`` ``#ansiteal``
+``ansigray`` ``#ansilightgray``
+``ansibrightblack`` ``#ansidarkgray``
+``ansibrightred`` ``#ansired``
+``ansibrightgreen`` ``#ansigreen``
+``ansibrightyellow`` ``#ansiyellow``
+``ansibrightblue`` ``#ansiblue``
+``ansibrightmagenta`` ``#ansifuchsia``
+``ansibrightcyan`` ``#ansiturquoise``
+``ansiwhite`` ``#ansiwhite``
+===================== ====================
+
+Old ANSI color names are deprecated but will still work.
diff --git a/doc/docs/terminal-sessions.rst b/doc/docs/terminal-sessions.rst
new file mode 100644
index 0000000..45af0eb
--- /dev/null
+++ b/doc/docs/terminal-sessions.rst
@@ -0,0 +1,46 @@
+Interactive terminal/shell sessions
+-----------------------------------
+
+To highlight an interactive terminal or shell session, prefix your code snippet
+with a specially formatted prompt.
+
+Supported shells with examples are shown below. In each example, prompt parts in
+brackets ``[any]`` represent optional parts of the prompt, and prompt parts
+without brackets or in parenthesis ``(any)`` represent required parts of the
+prompt.
+
+* **Bash Session** (console, shell-session):
+
+ .. code-block:: console
+
+ [any@any]$ ls -lh
+ [any@any]# ls -lh
+ [any@any]% ls -lh
+ $ ls -lh
+ # ls -lh
+ % ls -lh
+ > ls -lh
+
+* **MSDOS Session** (doscon):
+
+ .. code-block:: doscon
+
+ [any]> dir
+ > dir
+ More? dir
+
+* **Tcsh Session** (tcshcon):
+
+ .. code-block:: tcshcon
+
+ (any)> ls -lh
+ ? ls -lh
+
+* **PowerShell Session** (ps1con):
+
+ .. code-block:: ps1con
+
+ PS[any]> Get-ChildItem
+ PS> Get-ChildItem
+ >> Get-ChildItem
+
diff --git a/doc/docs/tokens.rst b/doc/docs/tokens.rst
new file mode 100644
index 0000000..0bc7586
--- /dev/null
+++ b/doc/docs/tokens.rst
@@ -0,0 +1,376 @@
+.. -*- mode: rst -*-
+
+==============
+Builtin Tokens
+==============
+
+.. module:: pygments.token
+
+In the :mod:`pygments.token` module, there is a special object called `Token`
+that is used to create token types.
+
+You can create a new token type by accessing an attribute of `Token` whose
+name starts with an uppercase letter:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.token import Token
+ >>> Token.String
+ Token.String
+ >>> Token.String is Token.String
+ True
+
+Note that tokens are singletons so you can use the ``is`` operator for comparing
+token types.
+
+You can also use the ``in`` operator to perform set tests:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.token import Comment
+ >>> Comment.Single in Comment
+ True
+ >>> Comment in Comment.Multi
+ False
+
+This can be useful in :doc:`filters <filters>` and if you write lexers on your
+own without using the base lexers.
+
+You can also split a token type into a hierarchy, and get the parent of it:
+
+.. sourcecode:: pycon
+
+ >>> String.split()
+ [Token, Token.Literal, Token.Literal.String]
+ >>> String.parent
+ Token.Literal
+
+In principle, you can create an unlimited number of token types but nobody can
+guarantee that a style would define style rules for a token type. Because of
+that, Pygments proposes some global token types defined in the
+`pygments.token.STANDARD_TYPES` dict.
+
+For some tokens aliases are already defined:
+
+.. sourcecode:: pycon
+
+ >>> from pygments.token import String
+ >>> String
+ Token.Literal.String
+
+Inside the :mod:`pygments.token` module the following aliases are defined:
+
+============= ============================ ====================================
+`Text` `Token.Text` for any type of text data
+`Whitespace` `Token.Text.Whitespace` for whitespace
+`Error` `Token.Error` represents lexer errors
+`Other` `Token.Other` special token for data not
+ matched by a parser (e.g. HTML
+ markup in PHP code)
+`Keyword` `Token.Keyword` any kind of keywords
+`Name` `Token.Name` variable/function names
+`Literal` `Token.Literal` Any literals
+`String` `Token.Literal.String` string literals
+`Number` `Token.Literal.Number` number literals
+`Operator` `Token.Operator` operators (``+``, ``not``...)
+`Punctuation` `Token.Punctuation` punctuation (``[``, ``(``...)
+`Comment` `Token.Comment` any kind of comments
+`Generic` `Token.Generic` generic tokens (have a look at
+ the explanation below)
+============= ============================ ====================================
+
+Normally you just create token types using the already defined aliases. For each
+of those token aliases, a number of subtypes exists (excluding the special tokens
+`Token.Text`, `Token.Error` and `Token.Other`)
+
+It's also possible to convert strings to token types (for example
+if you want to supply a token from the command line):
+
+.. sourcecode:: pycon
+
+ >>> from pygments.token import String, string_to_tokentype
+ >>> string_to_tokentype("String")
+ Token.Literal.String
+ >>> string_to_tokentype("Token.Literal.String")
+ Token.Literal.String
+ >>> string_to_tokentype(String)
+ Token.Literal.String
+
+
+Keyword Tokens
+==============
+
+`Keyword`
+ For any kind of keyword (especially if it doesn't match any of the
+ subtypes of course).
+
+`Keyword.Constant`
+ For keywords that are constants (e.g. ``None`` in future Python versions).
+
+`Keyword.Declaration`
+ For keywords used for variable declaration (e.g. ``var`` in some programming
+ languages like JavaScript).
+
+`Keyword.Namespace`
+ For keywords used for namespace declarations (e.g. ``import`` in Python and
+ Java and ``package`` in Java).
+
+`Keyword.Pseudo`
+ For keywords that aren't really keywords (e.g. ``None`` in old Python
+ versions).
+
+`Keyword.Reserved`
+ For reserved keywords.
+
+`Keyword.Type`
+ For builtin types that can't be used as identifiers (e.g. ``int``,
+ ``char`` etc. in C).
+
+
+Name Tokens
+===========
+
+`Name`
+ For any name (variable names, function names, classes).
+
+`Name.Attribute`
+ For all attributes (e.g. in HTML tags).
+
+`Name.Builtin`
+ Builtin names; names that are available in the global namespace.
+
+`Name.Builtin.Pseudo`
+ Builtin names that are implicit (e.g. ``self`` in Ruby, ``this`` in Java).
+
+`Name.Class`
+ Class names. Because no lexer can know if a name is a class or a function
+ or something else this token is meant for class declarations.
+
+`Name.Constant`
+ Token type for constants. In some languages you can recognise a token by the
+ way it's defined (the value after a ``const`` keyword for example). In
+ other languages constants are uppercase by definition (Ruby).
+
+`Name.Decorator`
+ Token type for decorators. Decorators are syntactic elements in the Python
+ language. Similar syntax elements exist in C# and Java.
+
+`Name.Entity`
+ Token type for special entities. (e.g. ``&nbsp;`` in HTML).
+
+`Name.Exception`
+ Token type for exception names (e.g. ``RuntimeError`` in Python). Some languages
+ define exceptions in the function signature (Java). You can highlight
+ the name of that exception using this token then.
+
+`Name.Function`
+ Token type for function names.
+
+`Name.Function.Magic`
+ same as `Name.Function` but for special function names that have an implicit use
+ in a language (e.g. ``__init__`` method in Python).
+
+`Name.Label`
+ Token type for label names (e.g. in languages that support ``goto``).
+
+`Name.Namespace`
+ Token type for namespaces. (e.g. import paths in Java/Python), names following
+ the ``module``/``namespace`` keyword in other languages.
+
+`Name.Other`
+ Other names. Normally unused.
+
+`Name.Property`
+ Additional token type occasionally used for class attributes.
+
+`Name.Tag`
+ Tag names (in HTML/XML markup or configuration files).
+
+`Name.Variable`
+ Token type for variables. Some languages have prefixes for variable names
+ (PHP, Ruby, Perl). You can highlight them using this token.
+
+`Name.Variable.Class`
+ same as `Name.Variable` but for class variables (also static variables).
+
+`Name.Variable.Global`
+ same as `Name.Variable` but for global variables (used in Ruby, for
+ example).
+
+`Name.Variable.Instance`
+ same as `Name.Variable` but for instance variables.
+
+`Name.Variable.Magic`
+ same as `Name.Variable` but for special variable names that have an implicit use
+ in a language (e.g. ``__doc__`` in Python).
+
+
+Literals
+========
+
+`Literal`
+ For any literal (if not further defined).
+
+`Literal.Date`
+ for date literals (e.g. ``42d`` in Boo).
+
+
+`String`
+ For any string literal.
+
+`String.Affix`
+ Token type for affixes that further specify the type of the string they're
+ attached to (e.g. the prefixes ``r`` and ``u8`` in ``r"foo"`` and ``u8"foo"``).
+
+`String.Backtick`
+ Token type for strings enclosed in backticks.
+
+`String.Char`
+ Token type for single characters (e.g. Java, C).
+
+`String.Delimiter`
+ Token type for delimiting identifiers in "heredoc", raw and other similar
+ strings (e.g. the word ``END`` in Perl code ``print <<'END';``).
+
+`String.Doc`
+ Token type for documentation strings (for example Python).
+
+`String.Double`
+ Double quoted strings.
+
+`String.Escape`
+ Token type for escape sequences in strings.
+
+`String.Heredoc`
+ Token type for "heredoc" strings (e.g. in Ruby or Perl).
+
+`String.Interpol`
+ Token type for interpolated parts in strings (e.g. ``#{foo}`` in Ruby).
+
+`String.Other`
+ Token type for any other strings (for example ``%q{foo}`` string constructs
+ in Ruby).
+
+`String.Regex`
+ Token type for regular expression literals (e.g. ``/foo/`` in JavaScript).
+
+`String.Single`
+ Token type for single quoted strings.
+
+`String.Symbol`
+ Token type for symbols (e.g. ``:foo`` in LISP or Ruby).
+
+
+`Number`
+ Token type for any number literal.
+
+`Number.Bin`
+ Token type for binary literals (e.g. ``0b101010``).
+
+`Number.Float`
+ Token type for float literals (e.g. ``42.0``).
+
+`Number.Hex`
+ Token type for hexadecimal number literals (e.g. ``0xdeadbeef``).
+
+`Number.Integer`
+ Token type for integer literals (e.g. ``42``).
+
+`Number.Integer.Long`
+ Token type for long integer literals (e.g. ``42L`` in Python).
+
+`Number.Oct`
+ Token type for octal literals.
+
+
+Operators
+=========
+
+`Operator`
+ For any punctuation operator (e.g. ``+``, ``-``).
+
+`Operator.Word`
+ For any operator that is a word (e.g. ``not``).
+
+
+Punctuation
+===========
+
+.. versionadded:: 0.7
+
+`Punctuation`
+ For any punctuation which is not an operator (e.g. ``[``, ``(``...)
+
+`Punctuation.Marker`
+ For markers that point to a location (e.g., carets in Python
+ tracebacks for syntax errors).
+
+ .. versionadded:: 2.10
+
+
+Comments
+========
+
+`Comment`
+ Token type for any comment.
+
+`Comment.Hashbang`
+ Token type for hashbang comments (i.e. first lines of files that start with
+ ``#!``).
+
+`Comment.Multiline`
+ Token type for multiline comments.
+
+`Comment.Preproc`
+ Token type for preprocessor comments (also ``<?php``/``<%`` constructs).
+
+`Comment.PreprocFile`
+ Token type for filenames in preprocessor comments, such as include files in C/C++.
+
+`Comment.Single`
+ Token type for comments that end at the end of a line (e.g. ``# foo``).
+
+`Comment.Special`
+ Special data in comments. For example code tags, author and license
+ information, etc.
+
+
+Generic Tokens
+==============
+
+Generic tokens are for special lexers like the `DiffLexer` that doesn't really
+highlight a programming language but a patch file.
+
+
+`Generic`
+ A generic, unstyled token. Normally you don't use this token type.
+
+`Generic.Deleted`
+ Marks the token value as deleted.
+
+`Generic.Emph`
+ Marks the token value as emphasized.
+
+`Generic.Error`
+ Marks the token value as an error message.
+
+`Generic.Heading`
+ Marks the token value as headline.
+
+`Generic.Inserted`
+ Marks the token value as inserted.
+
+`Generic.Output`
+ Marks the token value as program output (e.g. for python cli lexer).
+
+`Generic.Prompt`
+ Marks the token value as command prompt (e.g. bash lexer).
+
+`Generic.Strong`
+ Marks the token value as bold (e.g. for rst lexer).
+
+`Generic.Subheading`
+ Marks the token value as subheadline.
+
+`Generic.Traceback`
+ Marks the token value as a part of an error traceback.
diff --git a/doc/docs/unicode.rst b/doc/docs/unicode.rst
new file mode 100644
index 0000000..dca9111
--- /dev/null
+++ b/doc/docs/unicode.rst
@@ -0,0 +1,58 @@
+=====================
+Unicode and Encodings
+=====================
+
+Since Pygments 0.6, all lexers use unicode strings internally. Because of that
+you might encounter the occasional :exc:`UnicodeDecodeError` if you pass strings
+with the wrong encoding.
+
+Per default all lexers have their input encoding set to `guess`. This means
+that the following encodings are tried:
+
+* UTF-8 (including BOM handling)
+* The locale encoding (i.e. the result of `locale.getpreferredencoding()`)
+* As a last resort, `latin1`
+
+If you pass a lexer a byte string object (not unicode), it tries to decode the
+data using this encoding.
+
+You can override the encoding using the `encoding` or `inencoding` lexer
+options. If you have the `chardet`_ library installed and set the encoding to
+``chardet`` if will analyse the text and use the encoding it thinks is the
+right one automatically:
+
+.. sourcecode:: python
+
+ from pygments.lexers import PythonLexer
+ lexer = PythonLexer(encoding='chardet')
+
+The best way is to pass Pygments unicode objects. In that case you can't get
+unexpected output.
+
+The formatters now send Unicode objects to the stream if you don't set the
+output encoding. You can do so by passing the formatters an `encoding` option:
+
+.. sourcecode:: python
+
+ from pygments.formatters import HtmlFormatter
+ f = HtmlFormatter(encoding='utf-8')
+
+**You will have to set this option if you have non-ASCII characters in the
+source and the output stream does not accept Unicode written to it!**
+This is the case for all regular files and for terminals.
+
+Note: The Terminal formatter tries to be smart: if its output stream has an
+`encoding` attribute, and you haven't set the option, it will encode any
+Unicode string with this encoding before writing it. This is the case for
+`sys.stdout`, for example. The other formatters don't have that behavior.
+
+Another note: If you call Pygments via the command line (`pygmentize`),
+encoding is handled differently, see :doc:`the command line docs <cmdline>`.
+
+.. versionadded:: 0.7
+ The formatters now also accept an `outencoding` option which will override
+ the `encoding` option if given. This makes it possible to use a single
+ options dict with lexers and formatters, and still have different input and
+ output encodings.
+
+.. _chardet: https://chardet.github.io/
diff --git a/doc/download.rst b/doc/download.rst
new file mode 100644
index 0000000..7ac0868
--- /dev/null
+++ b/doc/download.rst
@@ -0,0 +1,39 @@
+Download and installation
+=========================
+
+The current release is version |version|.
+
+Packaged versions
+-----------------
+
+You can download it `from the Python Package Index
+<https://pypi.python.org/pypi/Pygments>`_. For installation of packages from
+PyPI, we recommend `Pip <https://www.pip-installer.org>`_, which works on all
+major platforms.
+
+Under Linux, most distributions include a package for Pygments, usually called
+``pygments`` or ``python-pygments``. You can install it with the package
+manager as usual.
+
+Development sources
+-------------------
+
+We're using the Git version control system. You can get the development source
+using this command::
+
+ git clone https://github.com/pygments/pygments
+
+Development takes place at `GitHub <https://github.com/pygments/pygments>`_.
+
+The latest changes in the development source code are listed in the `changelog
+<https://github.com/pygments/pygments/blob/master/CHANGES>`_.
+
+.. Documentation
+ -------------
+
+.. XXX todo
+
+ You can download the <a href="/docs/">documentation</a> either as
+ a bunch of rst files from the Git repository, see above, or
+ as a tar.gz containing rendered HTML files:</p>
+ <p><a href="/docs/download/pygmentsdocs.tar.gz">pygmentsdocs.tar.gz</a></p>
diff --git a/doc/examples/example.py b/doc/examples/example.py
new file mode 100644
index 0000000..6c9e2f1
--- /dev/null
+++ b/doc/examples/example.py
@@ -0,0 +1,14 @@
+from typing import Iterator
+
+# This is an example
+class Math:
+ @staticmethod
+ def fib(n: int) -> Iterator[int]:
+ """Fibonacci series up to n."""
+ a, b = 0, 1
+ while a < n:
+ yield a
+ a, b = b, a + b
+
+result = sum(Math.fib(42))
+print("The answer is {}".format(result))
diff --git a/doc/faq.rst b/doc/faq.rst
new file mode 100644
index 0000000..4e078dc
--- /dev/null
+++ b/doc/faq.rst
@@ -0,0 +1,142 @@
+:orphan:
+
+Pygments FAQ
+=============
+
+What is Pygments?
+-----------------
+
+Pygments is a syntax highlighting engine written in Python. That means, it will
+take source code (or other markup) in a supported language and output a
+processed version (in different formats) containing syntax highlighting markup.
+
+Its features include:
+
+* a wide range of common :doc:`languages and markup formats <languages>` is supported
+* new languages and formats are added easily
+* a number of output formats is available, including:
+
+ - HTML
+ - ANSI sequences (console output)
+ - LaTeX
+ - RTF
+
+* it is usable as a command-line tool and as a library
+* parsing and formatting is fast
+
+Pygments is licensed under the BSD license.
+
+Where does the name Pygments come from?
+---------------------------------------
+
+*Py* of course stands for Python, while *pigments* are used for coloring paint,
+and in this case, source code!
+
+What are the system requirements?
+---------------------------------
+
+Pygments only needs a standard Python install, version 3.6 or higher. No
+additional libraries are needed.
+
+How can I use Pygments?
+-----------------------
+
+Pygments is usable as a command-line tool as well as a library.
+
+From the command-line, usage looks like this (assuming the pygmentize script is
+properly installed)::
+
+ pygmentize -f html /path/to/file.py
+
+This will print a HTML-highlighted version of /path/to/file.py to standard output.
+
+For a complete help, please run ``pygmentize -h``.
+
+Usage as a library is thoroughly demonstrated in the Documentation section.
+
+How do I make a new style?
+--------------------------
+
+Please see the :doc:`documentation on styles <docs/styles>`.
+
+How can I report a bug or suggest a feature?
+--------------------------------------------
+
+Please report bugs and feature wishes in the tracker at GitHub.
+
+You can also e-mail the authors, see the contact details.
+
+I want this support for this language!
+--------------------------------------
+
+Instead of waiting for others to include language support, why not write it
+yourself? All you have to know is :doc:`outlined in the docs
+<docs/lexerdevelopment>`.
+
+Can I use Pygments for programming language processing?
+-------------------------------------------------------
+
+The Pygments lexing machinery is quite powerful can be used to build lexers for
+basically all languages. However, parsing them is not possible, though some
+lexers go some steps in this direction in order to e.g. highlight function names
+differently.
+
+Also, error reporting is not the scope of Pygments. It focuses on correctly
+highlighting syntactically valid documents, not finding and compensating errors.
+
+Who uses Pygments?
+------------------
+
+This is an (incomplete) list of projects and sites known to use the Pygments highlighter.
+
+* `Wikipedia <https://en.wikipedia.org/>`_
+* `BitBucket <https://bitbucket.org/>`_, a Mercurial and Git hosting site
+* `The Sphinx documentation builder <https://sphinx-doc.org/>`_, for embedded source examples
+* `rst2pdf <https://github.com/ralsina/rst2pdf>`_, a reStructuredText to PDF converter
+* `Codecov <https://codecov.io/>`_, a code coverage CI service
+* `Trac <https://trac.edgewall.org/>`_, the universal project management tool
+* `AsciiDoc <https://www.methods.co.nz/asciidoc/>`_, a text-based documentation generator
+* `ActiveState Code <https://code.activestate.com/>`_, the Python Cookbook successor
+* `ViewVC <http://viewvc.org/>`_, a web-based version control repository browser
+* `BzrFruit <https://repo.or.cz/w/bzrfruit.git>`_, a Bazaar branch viewer
+* `QBzr <http://bazaar-vcs.org/QBzr>`_, a cross-platform Qt-based GUI front end for Bazaar
+* `Review Board <https://www.reviewboard.org/>`_, a collaborative code reviewing tool
+* `Diamanda <https://code.google.com/archive/p/diamanda/>`_, a Django powered wiki system with support for Pygments
+* `Progopedia <http://progopedia.ru/>`_ (`English <http://progopedia.com/>`_),
+ an encyclopedia of programming languages
+* `Bruce <https://sites.google.com/site/r1chardj0n3s/bruce>`_, a reStructuredText presentation tool
+* `PIDA <http://pida.co.uk/>`_, a universal IDE written in Python
+* `BPython <https://bpython-interpreter.org/>`_, a curses-based intelligent Python shell
+* `PuDB <https://pypi.org/project/pudb/>`_, a console Python debugger
+* `XWiki <https://www.xwiki.org/>`_, a wiki-based development framework in Java, using Jython
+* `roux <http://ananelson.com/software/roux/>`_, a script for running R scripts
+ and creating beautiful output including graphs
+* `hurl <http://hurl.it/>`_, a web service for making HTTP requests
+* `wxHTMLPygmentizer <http://colinbarnette.net/projects/wxHTMLPygmentizer>`_ is
+ a GUI utility, used to make code-colorization easier
+* `Postmarkup <https://code.google.com/archive/p/postmarkup/>`_, a BBCode to XHTML generator
+* `WpPygments <http://blog.mirotin.net/?page_id=49>`_, and `WPygments
+ <https://github.com/capynet/WPygments>`_, highlighter plugins for WordPress
+* `Siafoo <http://siafoo.net>`_, a tool for sharing and storing useful code and programming experience
+* `D source <http://www.dsource.org/>`_, a community for the D programming language
+* `dpaste.com <http://dpaste.com/>`_, another Django pastebin
+* `Django snippets <https://djangosnippets.org/>`_, a pastebin for Django code
+* `Fayaa <http://www.fayaa.com/code/>`_, a Chinese pastebin
+* `Incollo.com <http://incollo.com>`_, a free collaborative debugging tool
+* `PasteBox <https://p.boxnet.eu/>`_, a pastebin focused on privacy
+* `hilite.me <http://www.hilite.me/>`_, a site to highlight code snippets
+* `patx.me <http://patx.me/paste>`_, a pastebin
+* `Fluidic <https://github.com/richsmith/fluidic>`_, an experiment in
+ integrating shells with a GUI
+* `pygments.rb <https://github.com/pygments/pygments.rb>`_, a pygments wrapper for Ruby
+* `Clygments <https://github.com/bfontaine/clygments>`_, a pygments wrapper for
+ Clojure
+* `PHPygments <https://github.com/capynet/PHPygments>`_, a pygments wrapper for PHP
+* `Spyder <https://www.spyder-ide.org/>`_, the Scientific Python Development
+ Environment, uses pygments for the multi-language syntax highlighting in its
+ `editor <https://docs.spyder-ide.org/editor.html>`_.
+* `snippet.host <https://snippet.host>`_, minimal text and code snippet hosting
+* `sourcehut <https://sourcehut.org>`_, the hacker's forge
+
+If you have a project or web site using Pygments, `open an issue or PR
+<https://github.com/pygments/pygments>`_ and we'll add a line here.
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..dbd1596
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,47 @@
+Welcome!
+========
+
+This is the home of Pygments. It is a generic syntax highlighter suitable for
+use in code hosting, forums, wikis or other applications that need to prettify
+source code. Highlights are:
+
+* a wide range of |language_count| languages and other text formats is supported
+* special attention is paid to details that increase highlighting quality
+* support for new languages and formats are added easily; most languages use a
+ simple regex-based lexing mechanism
+* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI
+ sequences
+* it is usable as a command-line tool and as a library
+
+Read more in the :doc:`FAQ list <faq>` or the :doc:`documentation <docs/index>`,
+or `download the latest release <https://pypi.python.org/pypi/Pygments>`_.
+
+.. _contribute:
+
+Contribute
+----------
+
+Like every open-source project, we are always looking for volunteers to help us
+with programming. Python knowledge is required, but don't fear: Python is a very
+clear and easy to learn language.
+
+Development takes place on `GitHub <https://github.com/pygments/pygments>`_.
+
+If you found a bug, just open a ticket in the GitHub tracker. Be sure to log
+in to be notified when the issue is fixed -- development is not fast-paced as
+the library is quite stable. You can also send an e-mail to the developers, see
+below.
+
+The authors
+-----------
+
+Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*, **Matthäus Chajdas** and **Jean Abou-Samra**.
+
+Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
+the `Pocoo <https://dev.pocoo.org/>`_ team and **Tim Hatch**.
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ docs/index
diff --git a/doc/languages.rst b/doc/languages.rst
new file mode 100644
index 0000000..8136442
--- /dev/null
+++ b/doc/languages.rst
@@ -0,0 +1,18 @@
+:orphan:
+
+Languages
+=========
+
+.. pygmentsdoc:: lexers_overview
+
+... that's all?
+---------------
+
+Well, why not write your own? Contributing to Pygments is easy and fun. Take a
+look at the :doc:`docs on lexer development <docs/lexerdevelopment>`. Pull
+requests are welcome on `GitHub <https://github.com/pygments/pygments>`_.
+
+.. note::
+
+ The languages listed here are supported in the development version. The
+ latest release may lack a few of them.
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..8803c98
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Pygments.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Pygments.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/doc/pygmentize.1 b/doc/pygmentize.1
new file mode 100644
index 0000000..5ac8fe6
--- /dev/null
+++ b/doc/pygmentize.1
@@ -0,0 +1,112 @@
+.TH PYGMENTIZE 1 "January 20, 2021"
+
+.SH NAME
+pygmentize \- highlights the input file
+
+.SH SYNOPSIS
+.B \fBpygmentize\fP
+.RI [-l\ \fI<lexer>\fP\ |\ -g]\ [-F\ \fI<filter>\fP[:\fI<options>\fP]]\ [-f\ \fI<formatter>\fP]
+.RI [-O\ \fI<options>\fP]\ [-P\ \fI<option=value>\fP]\ [-o\ \fI<outfile>\fP]\ [\fI<infile>\fP]
+.br
+.B \fBpygmentize\fP
+.RI -S\ \fI<style>\fP\ -f\ \fI<formatter>\fP\ [-a\ \fI<arg>\fP]\ [-O\ \fI<options>\fP]\ [-P\ \fI<option=value>\fP]
+.br
+.B \fBpygmentize\fP
+.RI -L\ [\fI<which>\fP\ ...]
+.br
+.B \fBpygmentize\fP
+.RI -N\ \fI<filename>\fP
+.br
+.B \fBpygmentize\fP
+.RI -C
+.br
+.B \fBpygmentize\fP
+.RI -H\ \fI<type>\fP\ \fI<name>\fP
+.br
+.B \fBpygmentize\fP
+.RI -h\ |\ -V
+
+.SH DESCRIPTION
+Pygments is a generic syntax highlighter for general use in all kinds
+of software such as forum systems, wikis or other applications that need to
+prettify source code.
+.PP
+Its highlights are:
+ * a wide range of common languages and markup formats is supported
+ * special attention is paid to details, increasing quality by a fair amount
+ * support for new languages and formats are added easily
+ * a number of output formats, presently HTML, LaTeX and ANSI sequences
+ * it is usable as a command-line tool and as a library
+ * ... and it highlights even Brainfuck!
+.PP
+\fBpygmentize\fP is a command that uses Pygments to highlight the input file and
+write the result to \fI<outfile>\fP. If no \fI<infile>\fP is given, stdin is used.
+.SH OPTIONS
+A summary of options is included below.
+.TP
+.B \-l \fI<lexer>\fP
+Set the lexer name. If not given, the lexer is guessed from the extension of the
+input file name (this obviously doesn't work if the input is stdin).
+.TP
+.B \-g
+Attempt to guess the lexer from the file contents, or pass through as plain text
+if this fails (this option works for highlighting standard input).
+.TP
+.B \-F \fI<filter>\fP[:\fI<options>\fP]
+Add a filter to the token stream. You can give options in the same way as for
+-O after a colon (note: there must not be spaces around the colon).
+This option can be given multiple times.
+.TP
+.B \-f \fI<formatter>\fP
+Set the formatter name. If not given, it will be guessed from the extension of
+the output file name. If no output file is given, the terminal formatter will be
+used by default.
+.TP
+.B \-o \fI<outfile>\fP
+Set output file. If not given, stdout is used.
+.TP
+.B \-O \fI<options>\fP
+With this option, you can give the lexer and formatter a comma-separated list of
+options, e.g. "-O bg=light,python=cool". Which options are valid for which
+lexers and formatters can be found in the documentation.
+This option can be given multiple times.
+.TP
+.B \-P \fI<option=value>\fP
+This option adds lexer and formatter options like the -O option, but
+you can only give one option per -P. That way, the option value may contain
+commas and equals signs, which it can't with -O.
+.TP
+.B \-S \fI<style>\fP
+Print out style definitions for style \fI<style>\fP and for formatter \fI<formatter>\fP.
+The meaning of the argument given by
+.B \-a \fI<arg>\fP
+is formatter dependent and can be found in the documentation.
+.TP
+.B \-L [\fI<which>\fP ...]
+List lexers, formatters, styles or filters. Set \fI<which>\fP to the thing you want
+to list (e.g. "styles"), or omit it to list everything.
+.TP
+.B \-N \fI<filename>\fP
+Guess and print out a lexer name based solely on the given filename. Does not
+take input or highlight anything. If no specific lexer can be found, "text"
+is printed.
+.TP
+.B \-C
+Like \fI-N\fP, but guess a lexer based on content read from standard input.
+.TP
+.B \-H \fI<type>\fP \fI<name>\fP
+Print detailed help for the object \fI<name>\fP of type \fI<type>\fP, where \fI<type>\fP is one
+of "lexer", "formatter" or "filter".
+.TP
+.B \-h
+Show help screen.
+.TP
+.B \-V
+Show version of the Pygments package.
+.SH SEE ALSO
+/usr/share/doc/python-pygments/index.html
+.SH AUTHOR
+pygmentize was written by Georg Brandl <g.brandl@gmx.net>.
+.PP
+This manual page was written by Piotr Ozarowski <ozarow@gmail.com>,
+for the Debian project (but may be used by others).
diff --git a/doc/pyodide/Dockerfile b/doc/pyodide/Dockerfile
new file mode 100644
index 0000000..969651c
--- /dev/null
+++ b/doc/pyodide/Dockerfile
@@ -0,0 +1,20 @@
+# Dockerfile for building Pyodide with a Pygmenets version from the current checkout.
+# For an example of how to use this image, see the `pyodide` target in the documentation's Makefile.
+FROM ghcr.io/pyodide/pyodide:0.20.0 AS build-stage
+
+WORKDIR pyodide
+
+# Copy new meta with path to local Pygments instead of pypi url.
+COPY doc/pyodide/meta.yaml packages/Pygments/
+
+COPY . /pygments
+
+# Add Pygments to the Pyodide build.
+ENV PYODIDE_PACKAGES=Pygments
+
+# Build Pyodide.
+RUN make
+
+FROM scratch AS export-stage
+
+COPY --from=build-stage /src/pyodide/build /
diff --git a/doc/pyodide/meta.yaml b/doc/pyodide/meta.yaml
new file mode 100644
index 0000000..d58e1d5
--- /dev/null
+++ b/doc/pyodide/meta.yaml
@@ -0,0 +1,8 @@
+package:
+ name: Pygments
+ version: '2.99'
+source:
+ path: /pygments
+test:
+ imports:
+ - pygments
diff --git a/doc/styles.rst b/doc/styles.rst
new file mode 100644
index 0000000..a1bb019
--- /dev/null
+++ b/doc/styles.rst
@@ -0,0 +1,5 @@
+:orphan:
+
+This file is overridden by _templates/styles.html and just exists to allow the
+Styles gallery to be reliably linked from the documentation
+(since its location varies between `make html` and `make dirhtml`).
diff --git a/external/autopygmentize b/external/autopygmentize
new file mode 100755
index 0000000..85d2366
--- /dev/null
+++ b/external/autopygmentize
@@ -0,0 +1,145 @@
+#!/bin/bash
+# Best effort auto-pygmentization with transparent decompression
+# by Reuben Thomas 2008-2022
+# This program is in the public domain.
+
+# Strategy: first see if pygmentize can find a lexer; if not, ask file; if that finds nothing, fail
+# Set the environment variable PYGMENTIZE_OPTS or pass options before the file path to configure pygments.
+
+# This program can be used as a .lessfilter for the less pager to auto-color less's output
+
+file="${!#}" # last argument
+options=${@:1:$(($#-1))} # handle others args as options to pass to pygmentize
+
+file_common_opts="--brief --dereference"
+
+case $(file --mime-type --uncompress $file_common_opts "$file") in
+ application/xml|image/svg+xml) lexer=xml;;
+ application/javascript) lexer=javascript;;
+ application/json) lexer=json;;
+ text/html) lexer=html;;
+ text/troff) lexer=nroff;;
+ text/x-asm) lexer=nasm;;
+ text/x-awk) lexer=awk;;
+ text/x-c) lexer=c;;
+ text/x-c++) lexer=cpp;;
+ text/x-clojure) lexer=clojure;;
+ text/x-crystal) lexer=crystal;;
+ text/x-diff) lexer=diff;;
+ text/x-execline) lexer=execline;;
+ text/x-forth) lexer=forth;;
+ text/x-fortran) lexer=fortran;;
+ text/x-gawk) lexer=gawk;;
+ text/x-java) lexer=java;;
+ text/x-lisp) lexer=common-lisp;;
+ text/x-lua|text/x-luatex) lexer=lua;;
+ text/x-makefile) lexer=make;;
+ text/x-msdos-batch) lexer=bat;;
+ text/x-nawk) lexer=nawk;;
+ text/x-objective-c) lexer=objective-c;;
+ text/x-pascal) lexer=pascal;;
+ text/x-perl) lexer=perl;;
+ text/x-php) lexer=php;;
+ text/x-po) lexer=po;;
+ text/x-python) lexer=python;;
+ text/x-ruby) lexer=ruby;;
+ text/x-script.python) lexer=python;;
+ text/x-shellscript) lexer=sh;;
+ text/x-tcl) lexer=tcl;;
+ text/x-tex|text/x-texinfo) lexer=latex;; # FIXME: texinfo really needs its own lexer
+ text/xml) lexer=xml;;
+ text/vnd.graphviz) lexer=graphviz;;
+
+ # Types that file outputs which pygmentize didn't support as of file 5.41, pygments 2.11.2
+ # text/binary
+ # text/calendar
+ # text/PGP
+ # text/prs.lines.tag
+ # text/rtf
+ # text/spreadsheet
+ # text/texmacs
+ # text/vcard
+ # text/vnd.sosi
+ # text/x-Algol68
+ # text/x-bcpl
+ # text/x-dmtf-mif
+ # text/x-gimp-curve
+ # text/x-gimp-ggr
+ # text/x-gimp-gpl
+ # text/x-info
+ # text/x-installshield-lid
+ # text/x-m4
+ # text/x-modulefile
+ # text/x-ms-adm
+ # text/x-ms-cpx
+ # text/x-ms-regedirt
+ # text/x-ms-tag
+ # text/x-systemtap
+ # text/x-vcard
+ # text/x-wine-extension-reg
+ # text/x-xmcd
+
+ text/plain) # special filenames. TODO: insert more
+ case $(basename "$file") in
+ .zshrc) lexer=sh;;
+ esac
+ # pygmentize -N is much cheaper than file, but makes some bad guesses (e.g.
+ # it guesses ".pl" is Prolog, not Perl)
+ lexer=$(pygmentize -N "$file")
+ ;;
+esac
+
+# Find a concatenator for compressed files
+concat=
+concat_opts=
+case $(file $file_common_opts --mime-type "$file") in
+ # TODO: add support
+ # application/x-rzip (does not decompress to stdout)
+ # application/x-dzip (Windows only)
+ application/gzip|application/x-gzip) concat=zcat;;
+ application/x-bzip) concat=bzip; concat_opts=-dc;;
+ application/x-bzip2) concat=bzcat;;
+ application/x-lz4) concat=lz4; concat_opts=-dc;;
+ application/x-lzh-compressed) concat=p7zip; concat_opts=-dc;;
+ application/x-lzma) concat=lzcat;;
+ application/x-lzip) concat=lzip; concat_opts=-dc;;
+ application/x-xz) concat=xzcat;;
+ application/x-zoo) concat=zoo; concat_opts=fu;;
+esac
+# If concat is unset or doesn't exist, use cat instead
+if [[ "$concat" == "" ]] || ! command -v "$concat"; then
+ concat=cat
+ concat_opts=
+fi
+
+# Find a suitable reader, preceded by a hex dump for binary files,
+# or fmt for text with very long lines
+prereader=""
+reader=cat
+encoding=$(file --mime-encoding --uncompress $file_common_opts "$file")
+# FIXME: need a way to switch between hex and text view, as file often
+# misdiagnoses files when they contain a few control characters
+# if [[ $encoding == "binary" ]]; then
+# prereader="od -x" # POSIX fallback
+# if [[ -n $(which hd) ]]; then
+# prereader=hd # preferred
+# fi
+# lexer=hexdump
+# encoding=latin1
+#el
+# FIXME: Using fmt does not work well for system logs
+# if [[ "$lexer" == "text" ]]; then
+# if file "$file" | grep -ql "text, with very long lines"; then
+# reader=fmt
+# fi
+# fi
+if [[ "$lexer" != "text" ]]; then
+ reader="pygmentize -O inencoding=$encoding $PYGMENTIZE_OPTS $options -l $lexer"
+fi
+
+# Run the reader
+if [[ -n "$prereader" ]]; then
+ exec $concat "$file" | $prereader | $reader
+else
+ exec $concat "$file" | $reader
+fi
diff --git a/external/lasso-builtins-generator-9.lasso b/external/lasso-builtins-generator-9.lasso
new file mode 100755
index 0000000..0156299
--- /dev/null
+++ b/external/lasso-builtins-generator-9.lasso
@@ -0,0 +1,162 @@
+#!/usr/bin/lasso9
+
+/*
+ Builtins Generator for Lasso 9
+
+ This is the shell script that was used to extract Lasso 9's built-in keywords
+ and generate most of the _lasso_builtins.py file. When run, it creates a file
+ containing the types, traits, methods, and members of the currently-installed
+ version of Lasso 9.
+
+ A list of tags in Lasso 8 can be generated with this code:
+
+ <?LassoScript
+ local('l8tags' = list,
+ 'l8libs' = array('Cache','ChartFX','Client','Database','File','HTTP',
+ 'iCal','Lasso','Link','List','PDF','Response','Stock','String',
+ 'Thread','Valid','WAP','XML'));
+ iterate(#l8libs, local('library'));
+ local('result' = namespace_load(#library));
+ /iterate;
+ iterate(tags_list, local('i'));
+ #l8tags->insert(string_removeleading(#i, -pattern='_global_'));
+ /iterate;
+ #l8tags->sort;
+ iterate(#l8tags, local('i'));
+ string_lowercase(#i)+"<br>";
+ /iterate;
+
+*/
+
+output("This output statement is required for a complete list of methods.")
+local(f) = file("_lasso_builtins-9.py")
+#f->doWithClose => {
+
+#f->openTruncate
+#f->writeString('# -*- coding: utf-8 -*-
+"""
+ pygments.lexers._lasso_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Built-in Lasso types, traits, methods, and members.
+
+ :copyright: Copyright 2006-'+date->year+' by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+')
+
+// Load and register contents of $LASSO9_MASTER_HOME/LassoModules/
+database_initialize
+
+// Load all of the libraries from builtins and lassoserver
+// This forces all possible available types and methods to be registered
+local(srcs =
+ (:
+ dir(sys_masterHomePath + '/LassoLibraries/builtins/')->eachFilePath,
+ dir(sys_masterHomePath + '/LassoLibraries/lassoserver/')->eachFilePath
+ )
+)
+
+with topLevelDir in delve(#srcs)
+where not #topLevelDir->lastComponent->beginsWith('.')
+do protect => {
+ handle_error => {
+ stdoutnl('Unable to load: ' + #topLevelDir + ' ' + error_msg)
+ }
+ library_thread_loader->loadLibrary(#topLevelDir)
+ stdoutnl('Loaded: ' + #topLevelDir)
+}
+
+email_initialize
+log_initialize
+session_initialize
+
+local(
+ typesList = set(),
+ traitsList = set(),
+ unboundMethodsList = set(),
+ memberMethodsList = set()
+)
+
+// types
+with type in sys_listTypes
+where not #type->asString->endsWith('$') // skip threads
+do {
+ #typesList->insert(#type)
+}
+
+// traits
+with trait in sys_listTraits
+where not #trait->asString->beginsWith('$') // skip combined traits
+do {
+ #traitsList->insert(#trait)
+}
+
+// member methods
+with type in #typesList
+do {
+ with method in #type->getType->listMethods
+ where #method->typeName == #type // skip inherited methods
+ let name = #method->methodName
+ where not #name->asString->endsWith('=') // skip setter methods
+ where #name->asString->isAlpha(1) // skip unpublished methods
+ do {
+ #memberMethodsList->insert(#name)
+ }
+}
+with trait in #traitsList
+do {
+ with method in #trait->getType->provides
+ where #method->typeName == #trait // skip inherited methods
+ let name = #method->methodName
+ where not #name->asString->endsWith('=') // skip setter methods
+ where #name->asString->isAlpha(1) // skip unpublished methods
+ do {
+ #memberMethodsList->insert(#name)
+ }
+}
+
+// unbound methods
+with method in sys_listUnboundMethods
+let name = #method->methodName
+where not #name->asString->endsWith('=') // skip setter methods
+where #name->asString->isAlpha(1) // skip unpublished methods
+where #typesList !>> #name
+where #traitsList !>> #name
+do {
+ #unboundMethodsList->insert(#name)
+}
+
+// write to file
+with i in (:
+ pair(#typesList, "BUILTINS = {
+ 'Types': (
+"),
+ pair(#traitsList, " ),
+ 'Traits': (
+"),
+ pair(#unboundMethodsList, " ),
+ 'Unbound Methods': (
+"),
+ pair(#memberMethodsList, " )
+}
+MEMBERS = {
+ 'Member Methods': (
+")
+)
+do {
+ #f->writeString(#i->second)
+ with t in (#i->first)
+ let ts = #t->asString
+ order by #ts
+ do {
+ #f->writeString(" '"+#ts->lowercase&asString+"',\n")
+ }
+}
+
+#f->writeString(" )
+}
+")
+
+}
diff --git a/external/lilypond-builtins-generator.ly b/external/lilypond-builtins-generator.ly
new file mode 100644
index 0000000..983b4c3
--- /dev/null
+++ b/external/lilypond-builtins-generator.ly
@@ -0,0 +1,391 @@
+%% Autogenerate a list of LilyPond keywords
+
+\version "2.23.6"
+
+#(use-modules (ice-9 receive)
+ (ice-9 regex))
+
+#(define port (open-output-file "../pygments/lexers/_lilypond_builtins.py"))
+
+#(define output-preamble
+ "\"\"\"
+ pygments.lexers._lilypond_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ LilyPond builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+\"\"\"
+
+# Contents generated by the script lilypond-builtins-generator.ly
+# found in the external/ directory of the source tree.
+
+")
+
+#(format port "~a" output-preamble)
+
+#(define (dump-py-list name vals)
+ (let* ((string-vals
+ (map symbol->string vals))
+ (fixed-vals
+ (filter-map
+ (lambda (str)
+ ; To avoid conflicts with Scheme builtins,
+ ; a leading backslash is prepended to \<,
+ ; \= and a few others. The lexer finds it
+ ; itself, so remove it here.
+ (cond
+ ((equal? str "\\\\")
+ #f)
+ ((string-startswith str "\\")
+ (string-drop str 1))
+ (else
+ str)))
+ string-vals))
+ (sorted-vals ; reproducibility
+ ; Avoid duplicates (e.g., identical pitches
+ ; in different languages)
+ (uniq-list
+ (sort fixed-vals string<?)))
+ (formatted-vals
+ (map
+ (lambda (val)
+ (format #f " \"~a\"," val name))
+ sorted-vals))
+ (joint-vals
+ (string-join formatted-vals "\n")))
+ (format port
+ "~a = [
+~a
+]
+
+"
+ name
+ joint-vals)))
+
+
+%% KEYWORDS
+
+#(define keywords
+ '(
+ ; Lexical modes.
+ notemode
+ lyricmode
+ lyricsto
+ addlyrics
+ chordmode
+ chords
+ figuremode
+ figures
+ drummode
+ drums
+ ; Output definitions.
+ header
+ layout
+ midi
+ paper
+ ; Context definitions.
+ ;; \context is also used in music. We take it as
+ ;; a keyword in both cases.
+ context
+ with
+ name
+ type
+ accepts
+ denies
+ alias
+ defaultchild
+ consists
+ remove
+ description
+ ;; Not strictly a keyword, but can be viewed so.
+ inherit-acceptability
+ ; Blocks.
+ book
+ bookpart
+ score
+ ; Other.
+ new
+ etc
+ include
+ language
+ version))
+
+#(dump-py-list 'keywords keywords)
+
+%% CLEFS
+
+#(define all-clefs
+ (map string->symbol (map car supported-clefs)))
+
+#(dump-py-list 'clefs all-clefs)
+
+%% SCALES
+
+#(define all-scales
+ '(major
+ minor
+ ionian
+ locrian
+ aeolian
+ mixolydian
+ lydian
+ phrygian
+ dorian))
+
+#(dump-py-list 'scales all-scales)
+
+%% REPEAT TYPES
+
+#(define all-repeat-types
+ '(volta percent unfold segno))
+
+#(dump-py-list 'repeat_types all-repeat-types)
+
+%% UNITS
+
+#(define all-units
+ '(mm cm in pt staff-space))
+
+#(dump-py-list 'units all-units)
+
+%% CHORD MODIFIERS
+
+#(define all-chord-modifiers
+ '(m dim aug maj))
+
+#(dump-py-list 'chord_modifiers all-chord-modifiers)
+
+%% PITCHES
+
+#(define all-pitch-language-names
+ (map car language-pitch-names))
+
+#(dump-py-list 'pitch_language_names all-pitch-language-names)
+
+#(define all-pitch-names
+ (append
+ ; We highlight rests just like pitches.
+ '(r R)
+ (map car (append-map cdr language-pitch-names))
+ ; Drum note names.
+ (map car drumPitchNames)))
+
+#(dump-py-list 'pitches all-pitch-names)
+
+%% MUSIC FUNCTIONS AND SHORTCUTS
+
+% View these as music functions.
+#(define extra-music-functions
+ '(set
+ unset
+ override
+ revert
+ tweak
+ once
+ undo
+ temporary
+ repeat
+ alternative
+ tempo
+ change))
+
+#(let* ((module (current-module))
+ (module-alist (ly:module->alist module))
+ (all-music-functions
+ (filter
+ (lambda (entry)
+ (ly:music-function? (cdr entry)))
+ module-alist))
+ (all-predefined-music-objects
+ (filter
+ (lambda (entry)
+ (ly:music? (cdr entry)))
+ module-alist)))
+ (receive (articulations non-articulations)
+ (partition
+ (lambda (entry)
+ (ly:event? (cdr entry)))
+ all-predefined-music-objects)
+ (receive (dynamics non-dynamic-articulations)
+ (partition
+ (lambda (entry)
+ (any
+ (lambda (type)
+ (music-is-of-type? (cdr entry)
+ type))
+ '(dynamic-event crescendo-event decrescendo-event)))
+ articulations)
+ (dump-py-list 'music_functions
+ (append extra-music-functions
+ (map car all-music-functions)))
+ (dump-py-list 'dynamics (map car dynamics))
+ (dump-py-list 'articulations (map car non-dynamic-articulations))
+ (dump-py-list 'music_commands (map car non-articulations)))))
+
+%% MARKUP COMMANDS
+
+#(let* ((markup-name-regexp
+ (make-regexp "(.*)-markup(-list)?"))
+ (modules
+ (cons (current-module)
+ (map resolve-module '((lily) (lily accreg)))))
+ (alist
+ (apply append
+ (map ly:module->alist modules)))
+ (markup-commands
+ (filter
+ (lambda (entry)
+ (or (markup-function? (cdr entry))
+ (markup-list-function? (cdr entry))))
+ alist))
+ (markup-command-names
+ (map
+ (lambda (entry)
+ (let* ((string-name (symbol->string (car entry)))
+ (match (regexp-exec markup-name-regexp string-name)))
+ (string->symbol (match:substring match 1))))
+ markup-commands))
+ (markup-words
+ (append '(markup markuplist)
+ markup-command-names)))
+ (dump-py-list 'markup_commands markup-words))
+
+%% GROBS
+
+#(let ((grob-names (map car all-grob-descriptions)))
+ (dump-py-list 'grobs grob-names))
+
+%% CONTEXTS
+
+#(let* ((layout-module
+ (ly:output-def-scope $defaultlayout))
+ (layout-alist
+ (ly:module->alist layout-module))
+ (all-context-defs
+ (filter
+ (lambda (entry)
+ (ly:context-def? (cdr entry)))
+ layout-alist))
+ (context-def-names
+ (map car all-context-defs)))
+ (dump-py-list 'contexts context-def-names))
+
+%% TRANSLATORS
+
+#(let* ((all-translators
+ (ly:get-all-translators))
+ (translator-names
+ (map ly:translator-name all-translators)))
+ (dump-py-list 'translators translator-names))
+
+%% SCHEME FUNCTIONS
+
+#(let* ((module (resolve-module '(lily)))
+ (module-alist (ly:module->alist module))
+ (all-functions
+ (filter
+ (lambda (entry)
+ (or (procedure? (cdr entry))
+ (macro? (cdr entry))))
+ module-alist))
+ (all-function-names
+ (map car all-functions)))
+ (dump-py-list 'scheme_functions all-function-names))
+
+%% PROPERTIES
+
+#(dump-py-list 'context_properties all-translation-properties)
+#(dump-py-list 'grob_properties all-backend-properties)
+
+%% PAPER VARIABLES
+
+% Reference: https://lilypond.org/doc/v2.22/Documentation/notation/page-layout
+#(define all-paper-variables
+ '(paper-height
+ top-margin
+ bottom-margin
+ ragged-bottom
+ ragged-last-bottom
+ markup-system-spacing
+ score-markup-spacing
+ score-system-spacing
+ system-system-spacing
+ markup-markup-spacing
+ last-bottom-spacing
+ top-system-spacing
+ top-markup-spacing
+ paper-width
+ line-width
+ left-margin
+ right-margin
+ check-consistency
+ ragged-right
+ ragged-last
+ two-sided
+ inner-margin
+ outer-margin
+ binding-offset
+ horizontal-shift
+ indent
+ short-indent
+ max-systems-per-page
+ min-systems-per-page
+ systems-per-page
+ system-count
+ page-breaking
+ page-breaking-system-system-spacing
+ page-count
+ blank-page-penalty
+ blank-last-page-penalty
+ auto-first-page-number
+ first-page-number
+ print-first-page-number
+ page-number-type
+ page-spacing-weight
+ print-all-headers
+ system-separator-markup
+ footnote-separator-markup
+ ;; Let's view these four as \paper variables.
+ basic-distance
+ minimum-distance
+ padding
+ stretchability
+ ;; These were forgotten in the documentation.
+ evenHeaderMarkup
+ oddHeaderMarkup
+ evenFooterMarkup
+ oddFooterMarkup
+ bookTitleMarkup
+ scoreTitleMarkup
+ ))
+
+#(dump-py-list 'paper_variables all-paper-variables)
+
+%% HEADER VARIABLES
+
+% Reference: https://lilypond.org/doc/v2.22/Documentation/notation/creating-titles-headers-and-footers.html#default-layout-of-bookpart-and-score-titles
+#(define all-header-variables
+ '(dedication
+ title
+ subtitle
+ subsubtitle
+ instrument
+ poet
+ composer
+ meter
+ arranger
+ tagline
+ copyright
+ piece
+ opus
+ ; The following are used in LSR snippets and regression tests.
+ lsrtags
+ doctitle
+ texidoc))
+
+#(dump-py-list 'header_variables all-header-variables)
+
+
+#(close-port port)
diff --git a/external/markdown-processor.py b/external/markdown-processor.py
new file mode 100644
index 0000000..d72012f
--- /dev/null
+++ b/external/markdown-processor.py
@@ -0,0 +1,66 @@
+"""
+ The Pygments Markdown Preprocessor
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This fragment is a Markdown_ preprocessor that renders source code
+ to HTML via Pygments. To use it, invoke Markdown like so::
+
+ import markdown
+
+ html = markdown.markdown(someText, extensions=[CodeBlockExtension()])
+
+ This uses CSS classes by default, so use
+ ``pygmentize -S <some style> -f html > pygments.css``
+ to create a stylesheet to be added to the website.
+
+ You can then highlight source code in your markdown markup::
+
+ [sourcecode:lexer]
+ some code
+ [/sourcecode]
+
+ .. _Markdown: https://pypi.python.org/pypi/Markdown
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Options
+# ~~~~~~~
+
+# Set to True if you want inline CSS styles instead of classes
+INLINESTYLES = False
+
+
+import re
+
+from markdown.preprocessors import Preprocessor
+from markdown.extensions import Extension
+
+from pygments import highlight
+from pygments.formatters import HtmlFormatter
+from pygments.lexers import get_lexer_by_name, TextLexer
+
+
+class CodeBlockPreprocessor(Preprocessor):
+
+ pattern = re.compile(r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
+
+ formatter = HtmlFormatter(noclasses=INLINESTYLES)
+
+ def run(self, lines):
+ def repl(m):
+ try:
+ lexer = get_lexer_by_name(m.group(1))
+ except ValueError:
+ lexer = TextLexer()
+ code = highlight(m.group(2), lexer, self.formatter)
+ code = code.replace('\n\n', '\n&nbsp;\n').replace('\n', '<br />')
+ return '\n\n<div class="code">%s</div>\n\n' % code
+ joined_lines = "\n".join(lines)
+ joined_lines = self.pattern.sub(repl, joined_lines)
+ return joined_lines.split("\n")
+
+class CodeBlockExtension(Extension):
+ def extendMarkdown(self, md, md_globals):
+ md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')
diff --git a/external/moin-parser.py b/external/moin-parser.py
new file mode 100644
index 0000000..562b76f
--- /dev/null
+++ b/external/moin-parser.py
@@ -0,0 +1,111 @@
+"""
+ The Pygments MoinMoin Parser
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This is a MoinMoin parser plugin that renders source code to HTML via
+ Pygments; you need Pygments 0.7 or newer for this parser to work.
+
+ To use it, set the options below to match your setup and put this file in
+ the data/plugin/parser subdirectory of your Moin instance, and give it the
+ name that the parser directive should have. For example, if you name the
+ file ``code.py``, you can get a highlighted Python code sample with this
+ Wiki markup::
+
+ {{{
+ #!code python
+ [...]
+ }}}
+
+ Additionally, if you set ATTACHMENTS below to True, Pygments will also be
+ called for all attachments for whose filenames there is no other parser
+ registered.
+
+ You are responsible for including CSS rules that will map the Pygments CSS
+ classes to colors. You can output a stylesheet file with `pygmentize`, put
+ it into the `htdocs` directory of your Moin instance and then include it in
+ the `stylesheets` configuration option in the Moin config, e.g.::
+
+ stylesheets = [('screen', '/htdocs/pygments.css')]
+
+ If you do not want to do that and are willing to accept larger HTML
+ output, you can set the INLINESTYLES option below to True.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Options
+# ~~~~~~~
+
+# Set to True if you want to highlight attachments, in addition to
+# {{{ }}} blocks.
+ATTACHMENTS = True
+
+# Set to True if you want inline CSS styles instead of classes
+INLINESTYLES = False
+
+
+import sys
+
+from pygments import highlight
+from pygments.lexers import get_lexer_by_name, get_lexer_for_filename, TextLexer
+from pygments.formatters import HtmlFormatter
+from pygments.util import ClassNotFound
+
+
+# wrap lines in <span>s so that the Moin-generated line numbers work
+class MoinHtmlFormatter(HtmlFormatter):
+ def wrap(self, source, outfile):
+ for line in source:
+ yield 1, '<span class="line">' + line[1] + '</span>'
+
+htmlformatter = MoinHtmlFormatter(noclasses=INLINESTYLES)
+textlexer = TextLexer()
+codeid = [0]
+
+
+class Parser:
+ """
+ MoinMoin Pygments parser.
+ """
+ if ATTACHMENTS:
+ extensions = '*'
+ else:
+ extensions = []
+
+ Dependencies = []
+
+ def __init__(self, raw, request, **kw):
+ self.raw = raw
+ self.req = request
+ if "format_args" in kw:
+ # called from a {{{ }}} block
+ try:
+ self.lexer = get_lexer_by_name(kw['format_args'].strip())
+ except ClassNotFound:
+ self.lexer = textlexer
+ return
+ if "filename" in kw:
+ # called for an attachment
+ filename = kw['filename']
+ else:
+ # called for an attachment by an older moin
+ # HACK: find out the filename by peeking into the execution
+ # frame which might not always work
+ try:
+ frame = sys._getframe(1)
+ filename = frame.f_locals['filename']
+ except:
+ filename = 'x.txt'
+ try:
+ self.lexer = get_lexer_for_filename(filename)
+ except ClassNotFound:
+ self.lexer = textlexer
+
+ def format(self, formatter):
+ codeid[0] += 1
+ id = "pygments_%s" % codeid[0]
+ w = self.req.write
+ w(formatter.code_area(1, id, start=1, step=1))
+ w(formatter.rawHTML(highlight(self.raw, self.lexer, htmlformatter)))
+ w(formatter.code_area(0, id))
diff --git a/external/pygments.bashcomp b/external/pygments.bashcomp
new file mode 100644
index 0000000..1299fdb
--- /dev/null
+++ b/external/pygments.bashcomp
@@ -0,0 +1,38 @@
+#!bash
+#
+# Bash completion support for Pygments (the 'pygmentize' command).
+#
+
+_pygmentize()
+{
+ local cur prev
+
+ COMPREPLY=()
+ cur=`_get_cword`
+ prev=${COMP_WORDS[COMP_CWORD-1]}
+
+ case "$prev" in
+ -f)
+ FORMATTERS=`pygmentize -L formatters | grep '* ' | cut -c3- | sed -e 's/,//g' -e 's/:$//'`
+ COMPREPLY=( $( compgen -W '$FORMATTERS' -- "$cur" ) )
+ return 0
+ ;;
+ -l)
+ LEXERS=`pygmentize -L lexers | grep '* ' | cut -c3- | sed -e 's/,//g' -e 's/:$//'`
+ COMPREPLY=( $( compgen -W '$LEXERS' -- "$cur" ) )
+ return 0
+ ;;
+ -S)
+ STYLES=`pygmentize -L styles | grep '* ' | cut -c3- | sed s/:$//`
+ COMPREPLY=( $( compgen -W '$STYLES' -- "$cur" ) )
+ return 0
+ ;;
+ esac
+
+ if [[ "$cur" == -* ]]; then
+ COMPREPLY=( $( compgen -W '-f -l -S -L -g -O -P -F \
+ -N -H -h -V -o' -- "$cur" ) )
+ return 0
+ fi
+}
+complete -F _pygmentize -o default pygmentize
diff --git a/external/rst-directive.py b/external/rst-directive.py
new file mode 100644
index 0000000..5872185
--- /dev/null
+++ b/external/rst-directive.py
@@ -0,0 +1,81 @@
+"""
+ The Pygments reStructuredText directive
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This fragment is a Docutils_ 0.5 directive that renders source code
+ (to HTML only, currently) via Pygments.
+
+ To use it, adjust the options below and copy the code into a module
+ that you import on initialization. The code then automatically
+ registers a ``sourcecode`` directive that you can use instead of
+ normal code blocks like this::
+
+ .. sourcecode:: python
+
+ My code goes here.
+
+ If you want to have different code styles, e.g. one with line numbers
+ and one without, add formatters with their names in the VARIANTS dict
+ below. You can invoke them instead of the DEFAULT one by using a
+ directive option::
+
+ .. sourcecode:: python
+ :linenos:
+
+ My code goes here.
+
+ Look at the `directive documentation`_ to get all the gory details.
+
+ .. _Docutils: https://docutils.sourceforge.io/
+ .. _directive documentation:
+ https://docutils.sourceforge.io/docs/howto/rst-directives.html
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Options
+# ~~~~~~~
+
+# Set to True if you want inline CSS styles instead of classes
+INLINESTYLES = False
+
+from pygments.formatters import HtmlFormatter
+
+# The default formatter
+DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
+
+# Add name -> formatter pairs for every variant you want to use
+VARIANTS = {
+ # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
+}
+
+
+from docutils import nodes
+from docutils.parsers.rst import directives, Directive
+
+from pygments import highlight
+from pygments.lexers import get_lexer_by_name, TextLexer
+
+class Pygments(Directive):
+ """ Source code syntax highlighting.
+ """
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec = {key: directives.flag for key in VARIANTS}
+ has_content = True
+
+ def run(self):
+ self.assert_has_content()
+ try:
+ lexer = get_lexer_by_name(self.arguments[0])
+ except ValueError:
+ # no lexer found - use the text one instead of an exception
+ lexer = TextLexer()
+ # take an arbitrary option if more than one is given
+ formatter = self.options and VARIANTS[list(self.options)[0]] or DEFAULT
+ parsed = highlight('\n'.join(self.content), lexer, formatter)
+ return [nodes.raw('', parsed, format='html')]
+
+directives.register_directive('sourcecode', Pygments)
diff --git a/external/scheme-builtins-generator.scm b/external/scheme-builtins-generator.scm
new file mode 100644
index 0000000..5c260b8
--- /dev/null
+++ b/external/scheme-builtins-generator.scm
@@ -0,0 +1,116 @@
+;; Autogenerate a list of Scheme keywords (i.e., macros) and built-in
+;; functions. This is written for the Guile implementation. The
+;; principle of autogenerating this has the advantage of catching many
+;; builtins that would be tedious to maintain by hand, and the
+;; disadvantage that some builtins very specific to Guile and not
+;; relevant to other implementations are caught as well. However,
+;; since Scheme builtin function names tend to be rather specific,
+;; this should not be a significant problem.
+
+(define port (open-output-file "../pygments/lexers/_scheme_builtins.py"))
+
+(display
+ "\"\"\"
+ pygments.lexers._scheme_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Scheme builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+\"\"\"
+"
+ port)
+
+(format port
+"\n# Autogenerated by external/scheme-builtins-generator.scm\n\
+# using Guile ~a.\n\n"
+ (version))
+
+(use-modules (srfi srfi-1)
+ (ice-9 match))
+
+(define relevant-modules
+ ;; This is a nightmare. Scheme builtins are split in
+ ;; gazillions of standards, SRFIs and implementation
+ ;; extensions. With so many sources, it's hard to define
+ ;; what is really a Scheme builtin. This is a rather
+ ;; conservative list of Guile modules that might be used
+ ;; the most frequently (somewhat subjective, admittedly).
+ '(
+ ;; The real builtins.
+ (guile)
+ ;; Let's include the fundamental list library.
+ (srfi srfi-1)
+ ;; define-record-type
+ (srfi srfi-9)
+ ;; let-values, let*-values
+ (srfi srfi-11)
+ ;; case-lambda
+ (srfi srfi-16)
+ ;; Pattern matching
+ (ice-9 match)
+ ;; Included for compatibility with files written for R5RS
+ (rnrs r5rs)))
+
+(define (get-all-bindings module)
+ ;; Need to recurse to find all public bindings. module-map
+ ;; only considers the module's own bindings.
+ (let* ((own (module-map cons module))
+ (uses (module-uses module)))
+ (append own (append-map get-all-bindings uses))))
+
+(define all-bindings
+ (append-map
+ ;; Need to use module-public-interface to restrict to
+ ;; public bindings. Note that module-uses already
+ ;; returns public interfaces.
+ (lambda (mod-path)
+ (let* ((mod-object (resolve-module mod-path))
+ (iface (module-public-interface mod-object)))
+ (get-all-bindings iface)))
+ relevant-modules))
+
+(define (filter-for pred)
+ (filter-map
+ (match-lambda
+ ((key . variable)
+ (and (variable-bound? variable)
+ (let ((value (variable-ref variable)))
+ (and (pred value)
+ key)))))
+ all-bindings))
+
+(define (sort-and-uniq lst pred)
+ (let loop ((lst (sort lst pred))
+ (acc '()))
+ (match lst
+ (() (reverse! acc))
+ ((one . rest)
+ (loop (drop-while (lambda (elt)
+ (equal? elt one))
+ rest)
+ (cons one acc))))))
+
+(define (dump-py-list lst)
+ (string-join
+ (map
+ (lambda (name)
+ (format #f " \"~a\"," name))
+ (sort-and-uniq
+ (map symbol->string lst)
+ string<?))
+ "\n"))
+
+(define (dump-builtins name pred extra)
+ (format port
+ "~a = {\n~a\n}\n\n"
+ name
+ (dump-py-list (append extra (filter-for pred)))))
+
+(define extra-procedures
+ ;; These are found in RnRS but not implemented by Guile.
+ '(load transcript-off transcript-on))
+
+(dump-builtins 'scheme_keywords macro? '())
+(dump-builtins 'scheme_builtins procedure? extra-procedures)
diff --git a/pygments/__init__.py b/pygments/__init__.py
new file mode 100644
index 0000000..9cb60d1
--- /dev/null
+++ b/pygments/__init__.py
@@ -0,0 +1,82 @@
+"""
+ Pygments
+ ~~~~~~~~
+
+ Pygments is a syntax highlighting package written in Python.
+
+ It is a generic syntax highlighter for general use in all kinds of software
+ such as forum systems, wikis or other applications that need to prettify
+ source code. Highlights are:
+
+ * a wide range of common languages and markup formats is supported
+ * special attention is paid to details, increasing quality by a fair amount
+ * support for new languages and formats are added easily
+ * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
+ formats that PIL supports, and ANSI sequences
+ * it is usable as a command-line tool and as a library
+ * ... and it highlights even Brainfuck!
+
+ The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
+
+ .. _Pygments master branch:
+ https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from io import StringIO, BytesIO
+
+__version__ = '2.14.0'
+__docformat__ = 'restructuredtext'
+
+__all__ = ['lex', 'format', 'highlight']
+
+
+def lex(code, lexer):
+ """
+ Lex ``code`` with ``lexer`` and return an iterable of tokens.
+ """
+ try:
+ return lexer.get_tokens(code)
+ except TypeError:
+ # Heuristic to catch a common mistake.
+ from pygments.lexer import RegexLexer
+ if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
+ raise TypeError('lex() argument must be a lexer instance, '
+ 'not a class')
+ raise
+
+
+def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
+ """
+ Format a tokenlist ``tokens`` with the formatter ``formatter``.
+
+ If ``outfile`` is given and a valid file object (an object
+ with a ``write`` method), the result will be written to it, otherwise
+ it is returned as a string.
+ """
+ try:
+ if not outfile:
+ realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
+ formatter.format(tokens, realoutfile)
+ return realoutfile.getvalue()
+ else:
+ formatter.format(tokens, outfile)
+ except TypeError:
+ # Heuristic to catch a common mistake.
+ from pygments.formatter import Formatter
+ if isinstance(formatter, type) and issubclass(formatter, Formatter):
+ raise TypeError('format() argument must be a formatter instance, '
+ 'not a class')
+ raise
+
+
+def highlight(code, lexer, formatter, outfile=None):
+ """
+ Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
+
+ If ``outfile`` is given and a valid file object (an object
+ with a ``write`` method), the result will be written to it, otherwise
+ it is returned as a string.
+ """
+ return format(lex(code, lexer), formatter, outfile)
diff --git a/pygments/__main__.py b/pygments/__main__.py
new file mode 100644
index 0000000..423b46e
--- /dev/null
+++ b/pygments/__main__.py
@@ -0,0 +1,17 @@
+"""
+ pygments.__main__
+ ~~~~~~~~~~~~~~~~~
+
+ Main entry point for ``python -m pygments``.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import pygments.cmdline
+
+try:
+ sys.exit(pygments.cmdline.main(sys.argv))
+except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/pygments/cmdline.py b/pygments/cmdline.py
new file mode 100644
index 0000000..1fdf335
--- /dev/null
+++ b/pygments/cmdline.py
@@ -0,0 +1,668 @@
+"""
+ pygments.cmdline
+ ~~~~~~~~~~~~~~~~
+
+ Command line interface.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import sys
+import shutil
+import argparse
+from textwrap import dedent
+
+from pygments import __version__, highlight
+from pygments.util import ClassNotFound, OptionError, docstring_headline, \
+ guess_decode, guess_decode_from_terminal, terminal_encoding, \
+ UnclosingTextIOWrapper
+from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
+ load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
+from pygments.lexers.special import TextLexer
+from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
+from pygments.formatters import get_all_formatters, get_formatter_by_name, \
+ load_formatter_from_file, get_formatter_for_filename, find_formatter_class
+from pygments.formatters.terminal import TerminalFormatter
+from pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
+from pygments.filters import get_all_filters, find_filter_class
+from pygments.styles import get_all_styles, get_style_by_name
+
+
+def _parse_options(o_strs):
+ opts = {}
+ if not o_strs:
+ return opts
+ for o_str in o_strs:
+ if not o_str.strip():
+ continue
+ o_args = o_str.split(',')
+ for o_arg in o_args:
+ o_arg = o_arg.strip()
+ try:
+ o_key, o_val = o_arg.split('=', 1)
+ o_key = o_key.strip()
+ o_val = o_val.strip()
+ except ValueError:
+ opts[o_arg] = True
+ else:
+ opts[o_key] = o_val
+ return opts
+
+
+def _parse_filters(f_strs):
+ filters = []
+ if not f_strs:
+ return filters
+ for f_str in f_strs:
+ if ':' in f_str:
+ fname, fopts = f_str.split(':', 1)
+ filters.append((fname, _parse_options([fopts])))
+ else:
+ filters.append((f_str, {}))
+ return filters
+
+
+def _print_help(what, name):
+ try:
+ if what == 'lexer':
+ cls = get_lexer_by_name(name)
+ print("Help on the %s lexer:" % cls.name)
+ print(dedent(cls.__doc__))
+ elif what == 'formatter':
+ cls = find_formatter_class(name)
+ print("Help on the %s formatter:" % cls.name)
+ print(dedent(cls.__doc__))
+ elif what == 'filter':
+ cls = find_filter_class(name)
+ print("Help on the %s filter:" % name)
+ print(dedent(cls.__doc__))
+ return 0
+ except (AttributeError, ValueError):
+ print("%s not found!" % what, file=sys.stderr)
+ return 1
+
+
+def _print_list(what):
+ if what == 'lexer':
+ print()
+ print("Lexers:")
+ print("~~~~~~~")
+
+ info = []
+ for fullname, names, exts, _ in get_all_lexers():
+ tup = (', '.join(names)+':', fullname,
+ exts and '(filenames ' + ', '.join(exts) + ')' or '')
+ info.append(tup)
+ info.sort()
+ for i in info:
+ print(('* %s\n %s %s') % i)
+
+ elif what == 'formatter':
+ print()
+ print("Formatters:")
+ print("~~~~~~~~~~~")
+
+ info = []
+ for cls in get_all_formatters():
+ doc = docstring_headline(cls)
+ tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
+ '(filenames ' + ', '.join(cls.filenames) + ')' or '')
+ info.append(tup)
+ info.sort()
+ for i in info:
+ print(('* %s\n %s %s') % i)
+
+ elif what == 'filter':
+ print()
+ print("Filters:")
+ print("~~~~~~~~")
+
+ for name in get_all_filters():
+ cls = find_filter_class(name)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
+
+ elif what == 'style':
+ print()
+ print("Styles:")
+ print("~~~~~~~")
+
+ for name in get_all_styles():
+ cls = get_style_by_name(name)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
+
+
+def _print_list_as_json(requested_items):
+ import json
+ result = {}
+ if 'lexer' in requested_items:
+ info = {}
+ for fullname, names, filenames, mimetypes in get_all_lexers():
+ info[fullname] = {
+ 'aliases': names,
+ 'filenames': filenames,
+ 'mimetypes': mimetypes
+ }
+ result['lexers'] = info
+
+ if 'formatter' in requested_items:
+ info = {}
+ for cls in get_all_formatters():
+ doc = docstring_headline(cls)
+ info[cls.name] = {
+ 'aliases': cls.aliases,
+ 'filenames': cls.filenames,
+ 'doc': doc
+ }
+ result['formatters'] = info
+
+ if 'filter' in requested_items:
+ info = {}
+ for name in get_all_filters():
+ cls = find_filter_class(name)
+ info[name] = {
+ 'doc': docstring_headline(cls)
+ }
+ result['filters'] = info
+
+ if 'style' in requested_items:
+ info = {}
+ for name in get_all_styles():
+ cls = get_style_by_name(name)
+ info[name] = {
+ 'doc': docstring_headline(cls)
+ }
+ result['styles'] = info
+
+ json.dump(result, sys.stdout)
+
+def main_inner(parser, argns):
+ if argns.help:
+ parser.print_help()
+ return 0
+
+ if argns.V:
+ print('Pygments version %s, (c) 2006-2022 by Georg Brandl, Matthäus '
+ 'Chajdas and contributors.' % __version__)
+ return 0
+
+ def is_only_option(opt):
+ return not any(v for (k, v) in vars(argns).items() if k != opt)
+
+ # handle ``pygmentize -L``
+ if argns.L is not None:
+ arg_set = set()
+ for k, v in vars(argns).items():
+ if v:
+ arg_set.add(k)
+
+ arg_set.discard('L')
+ arg_set.discard('json')
+
+ if arg_set:
+ parser.print_help(sys.stderr)
+ return 2
+
+ # print version
+ if not argns.json:
+ main(['', '-V'])
+ allowed_types = {'lexer', 'formatter', 'filter', 'style'}
+ largs = [arg.rstrip('s') for arg in argns.L]
+ if any(arg not in allowed_types for arg in largs):
+ parser.print_help(sys.stderr)
+ return 0
+ if not largs:
+ largs = allowed_types
+ if not argns.json:
+ for arg in largs:
+ _print_list(arg)
+ else:
+ _print_list_as_json(largs)
+ return 0
+
+ # handle ``pygmentize -H``
+ if argns.H:
+ if not is_only_option('H'):
+ parser.print_help(sys.stderr)
+ return 2
+ what, name = argns.H
+ if what not in ('lexer', 'formatter', 'filter'):
+ parser.print_help(sys.stderr)
+ return 2
+ return _print_help(what, name)
+
+ # parse -O options
+ parsed_opts = _parse_options(argns.O or [])
+
+ # parse -P options
+ for p_opt in argns.P or []:
+ try:
+ name, value = p_opt.split('=', 1)
+ except ValueError:
+ parsed_opts[p_opt] = True
+ else:
+ parsed_opts[name] = value
+
+ # encodings
+ inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
+ outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
+
+ # handle ``pygmentize -N``
+ if argns.N:
+ lexer = find_lexer_class_for_filename(argns.N)
+ if lexer is None:
+ lexer = TextLexer
+
+ print(lexer.aliases[0])
+ return 0
+
+ # handle ``pygmentize -C``
+ if argns.C:
+ inp = sys.stdin.buffer.read()
+ try:
+ lexer = guess_lexer(inp, inencoding=inencoding)
+ except ClassNotFound:
+ lexer = TextLexer
+
+ print(lexer.aliases[0])
+ return 0
+
+ # handle ``pygmentize -S``
+ S_opt = argns.S
+ a_opt = argns.a
+ if S_opt is not None:
+ f_opt = argns.f
+ if not f_opt:
+ parser.print_help(sys.stderr)
+ return 2
+ if argns.l or argns.INPUTFILE:
+ parser.print_help(sys.stderr)
+ return 2
+
+ try:
+ parsed_opts['style'] = S_opt
+ fmter = get_formatter_by_name(f_opt, **parsed_opts)
+ except ClassNotFound as err:
+ print(err, file=sys.stderr)
+ return 1
+
+ print(fmter.get_style_defs(a_opt or ''))
+ return 0
+
+ # if no -S is given, -a is not allowed
+ if argns.a is not None:
+ parser.print_help(sys.stderr)
+ return 2
+
+ # parse -F options
+ F_opts = _parse_filters(argns.F or [])
+
+ # -x: allow custom (eXternal) lexers and formatters
+ allow_custom_lexer_formatter = bool(argns.x)
+
+ # select lexer
+ lexer = None
+
+ # given by name?
+ lexername = argns.l
+ if lexername:
+ # custom lexer, located relative to user's cwd
+ if allow_custom_lexer_formatter and '.py' in lexername:
+ try:
+ filename = None
+ name = None
+ if ':' in lexername:
+ filename, name = lexername.rsplit(':', 1)
+
+ if '.py' in name:
+ # This can happen on Windows: If the lexername is
+ # C:\lexer.py -- return to normal load path in that case
+ name = None
+
+ if filename and name:
+ lexer = load_lexer_from_file(filename, name,
+ **parsed_opts)
+ else:
+ lexer = load_lexer_from_file(lexername, **parsed_opts)
+ except ClassNotFound as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ else:
+ try:
+ lexer = get_lexer_by_name(lexername, **parsed_opts)
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ # read input code
+ code = None
+
+ if argns.INPUTFILE:
+ if argns.s:
+ print('Error: -s option not usable when input file specified',
+ file=sys.stderr)
+ return 2
+
+ infn = argns.INPUTFILE
+ try:
+ with open(infn, 'rb') as infp:
+ code = infp.read()
+ except Exception as err:
+ print('Error: cannot read infile:', err, file=sys.stderr)
+ return 1
+ if not inencoding:
+ code, inencoding = guess_decode(code)
+
+ # do we have to guess the lexer?
+ if not lexer:
+ try:
+ lexer = get_lexer_for_filename(infn, code, **parsed_opts)
+ except ClassNotFound as err:
+ if argns.g:
+ try:
+ lexer = guess_lexer(code, **parsed_opts)
+ except ClassNotFound:
+ lexer = TextLexer(**parsed_opts)
+ else:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ except OptionError as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ elif not argns.s: # treat stdin as full file (-s support is later)
+ # read code from terminal, always in binary mode since we want to
+ # decode ourselves and be tolerant with it
+ code = sys.stdin.buffer.read() # use .buffer to get a binary stream
+ if not inencoding:
+ code, inencoding = guess_decode_from_terminal(code, sys.stdin)
+ # else the lexer will do the decoding
+ if not lexer:
+ try:
+ lexer = guess_lexer(code, **parsed_opts)
+ except ClassNotFound:
+ lexer = TextLexer(**parsed_opts)
+
+ else: # -s option needs a lexer with -l
+ if not lexer:
+ print('Error: when using -s a lexer has to be selected with -l',
+ file=sys.stderr)
+ return 2
+
+ # process filters
+ for fname, fopts in F_opts:
+ try:
+ lexer.add_filter(fname, **fopts)
+ except ClassNotFound as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ # select formatter
+ outfn = argns.o
+ fmter = argns.f
+ if fmter:
+ # custom formatter, located relative to user's cwd
+ if allow_custom_lexer_formatter and '.py' in fmter:
+ try:
+ filename = None
+ name = None
+ if ':' in fmter:
+ # Same logic as above for custom lexer
+ filename, name = fmter.rsplit(':', 1)
+
+ if '.py' in name:
+ name = None
+
+ if filename and name:
+ fmter = load_formatter_from_file(filename, name,
+ **parsed_opts)
+ else:
+ fmter = load_formatter_from_file(fmter, **parsed_opts)
+ except ClassNotFound as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ else:
+ try:
+ fmter = get_formatter_by_name(fmter, **parsed_opts)
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ if outfn:
+ if not fmter:
+ try:
+ fmter = get_formatter_for_filename(outfn, **parsed_opts)
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ try:
+ outfile = open(outfn, 'wb')
+ except Exception as err:
+ print('Error: cannot open outfile:', err, file=sys.stderr)
+ return 1
+ else:
+ if not fmter:
+ if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
+ fmter = TerminalTrueColorFormatter(**parsed_opts)
+ elif '256' in os.environ.get('TERM', ''):
+ fmter = Terminal256Formatter(**parsed_opts)
+ else:
+ fmter = TerminalFormatter(**parsed_opts)
+ outfile = sys.stdout.buffer
+
+ # determine output encoding if not explicitly selected
+ if not outencoding:
+ if outfn:
+ # output file? use lexer encoding for now (can still be None)
+ fmter.encoding = inencoding
+ else:
+ # else use terminal encoding
+ fmter.encoding = terminal_encoding(sys.stdout)
+
+ # provide coloring under Windows, if possible
+ if not outfn and sys.platform in ('win32', 'cygwin') and \
+ fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
+ # unfortunately colorama doesn't support binary streams on Py3
+ outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
+ fmter.encoding = None
+ try:
+ import colorama.initialise
+ except ImportError:
+ pass
+ else:
+ outfile = colorama.initialise.wrap_stream(
+ outfile, convert=None, strip=None, autoreset=False, wrap=True)
+
+ # When using the LaTeX formatter and the option `escapeinside` is
+ # specified, we need a special lexer which collects escaped text
+ # before running the chosen language lexer.
+ escapeinside = parsed_opts.get('escapeinside', '')
+ if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
+ left = escapeinside[0]
+ right = escapeinside[1]
+ lexer = LatexEmbeddedLexer(left, right, lexer)
+
+ # ... and do it!
+ if not argns.s:
+ # process whole input as per normal...
+ try:
+ highlight(code, lexer, fmter, outfile)
+ finally:
+ if outfn:
+ outfile.close()
+ return 0
+ else:
+ # line by line processing of stdin (eg: for 'tail -f')...
+ try:
+ while 1:
+ line = sys.stdin.buffer.readline()
+ if not line:
+ break
+ if not inencoding:
+ line = guess_decode_from_terminal(line, sys.stdin)[0]
+ highlight(line, lexer, fmter, outfile)
+ if hasattr(outfile, 'flush'):
+ outfile.flush()
+ return 0
+ except KeyboardInterrupt: # pragma: no cover
+ return 0
+ finally:
+ if outfn:
+ outfile.close()
+
+
+class HelpFormatter(argparse.HelpFormatter):
+ def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
+ if width is None:
+ try:
+ width = shutil.get_terminal_size().columns - 2
+ except Exception:
+ pass
+ argparse.HelpFormatter.__init__(self, prog, indent_increment,
+ max_help_position, width)
+
+
+def main(args=sys.argv):
+ """
+ Main command line entry point.
+ """
+ desc = "Highlight an input file and write the result to an output file."
+ parser = argparse.ArgumentParser(description=desc, add_help=False,
+ formatter_class=HelpFormatter)
+
+ operation = parser.add_argument_group('Main operation')
+ lexersel = operation.add_mutually_exclusive_group()
+ lexersel.add_argument(
+ '-l', metavar='LEXER',
+ help='Specify the lexer to use. (Query names with -L.) If not '
+ 'given and -g is not present, the lexer is guessed from the filename.')
+ lexersel.add_argument(
+ '-g', action='store_true',
+ help='Guess the lexer from the file contents, or pass through '
+ 'as plain text if nothing can be guessed.')
+ operation.add_argument(
+ '-F', metavar='FILTER[:options]', action='append',
+ help='Add a filter to the token stream. (Query names with -L.) '
+ 'Filter options are given after a colon if necessary.')
+ operation.add_argument(
+ '-f', metavar='FORMATTER',
+ help='Specify the formatter to use. (Query names with -L.) '
+ 'If not given, the formatter is guessed from the output filename, '
+ 'and defaults to the terminal formatter if the output is to the '
+ 'terminal or an unknown file extension.')
+ operation.add_argument(
+ '-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
+ help='Give options to the lexer and formatter as a comma-separated '
+ 'list of key-value pairs. '
+ 'Example: `-O bg=light,python=cool`.')
+ operation.add_argument(
+ '-P', metavar='OPTION=value', action='append',
+ help='Give a single option to the lexer and formatter - with this '
+ 'you can pass options whose value contains commas and equal signs. '
+ 'Example: `-P "heading=Pygments, the Python highlighter"`.')
+ operation.add_argument(
+ '-o', metavar='OUTPUTFILE',
+ help='Where to write the output. Defaults to standard output.')
+
+ operation.add_argument(
+ 'INPUTFILE', nargs='?',
+ help='Where to read the input. Defaults to standard input.')
+
+ flags = parser.add_argument_group('Operation flags')
+ flags.add_argument(
+ '-v', action='store_true',
+ help='Print a detailed traceback on unhandled exceptions, which '
+ 'is useful for debugging and bug reports.')
+ flags.add_argument(
+ '-s', action='store_true',
+ help='Process lines one at a time until EOF, rather than waiting to '
+ 'process the entire file. This only works for stdin, only for lexers '
+ 'with no line-spanning constructs, and is intended for streaming '
+ 'input such as you get from `tail -f`. '
+ 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
+ flags.add_argument(
+ '-x', action='store_true',
+ help='Allow custom lexers and formatters to be loaded from a .py file '
+ 'relative to the current working directory. For example, '
+ '`-l ./customlexer.py -x`. By default, this option expects a file '
+ 'with a class named CustomLexer or CustomFormatter; you can also '
+ 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
+ 'Users should be very careful not to use this option with untrusted '
+ 'files, because it will import and run them.')
+ flags.add_argument('--json', help='Output as JSON. This can '
+ 'be only used in conjunction with -L.',
+ default=False,
+ action='store_true')
+
+ special_modes_group = parser.add_argument_group(
+ 'Special modes - do not do any highlighting')
+ special_modes = special_modes_group.add_mutually_exclusive_group()
+ special_modes.add_argument(
+ '-S', metavar='STYLE -f formatter',
+ help='Print style definitions for STYLE for a formatter '
+ 'given with -f. The argument given by -a is formatter '
+ 'dependent.')
+ special_modes.add_argument(
+ '-L', nargs='*', metavar='WHAT',
+ help='List lexers, formatters, styles or filters -- '
+ 'give additional arguments for the thing(s) you want to list '
+ '(e.g. "styles"), or omit them to list everything.')
+ special_modes.add_argument(
+ '-N', metavar='FILENAME',
+ help='Guess and print out a lexer name based solely on the given '
+ 'filename. Does not take input or highlight anything. If no specific '
+ 'lexer can be determined, "text" is printed.')
+ special_modes.add_argument(
+ '-C', action='store_true',
+ help='Like -N, but print out a lexer name based solely on '
+ 'a given content from standard input.')
+ special_modes.add_argument(
+ '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
+ help='Print detailed help for the object <name> of type <type>, '
+ 'where <type> is one of "lexer", "formatter" or "filter".')
+ special_modes.add_argument(
+ '-V', action='store_true',
+ help='Print the package version.')
+ special_modes.add_argument(
+ '-h', '--help', action='store_true',
+ help='Print this help.')
+ special_modes_group.add_argument(
+ '-a', metavar='ARG',
+ help='Formatter-specific additional argument for the -S (print '
+ 'style sheet) mode.')
+
+ argns = parser.parse_args(args[1:])
+
+ try:
+ return main_inner(parser, argns)
+ except BrokenPipeError:
+ # someone closed our stdout, e.g. by quitting a pager.
+ return 0
+ except Exception:
+ if argns.v:
+ print(file=sys.stderr)
+ print('*' * 65, file=sys.stderr)
+ print('An unhandled exception occurred while highlighting.',
+ file=sys.stderr)
+ print('Please report the whole traceback to the issue tracker at',
+ file=sys.stderr)
+ print('<https://github.com/pygments/pygments/issues>.',
+ file=sys.stderr)
+ print('*' * 65, file=sys.stderr)
+ print(file=sys.stderr)
+ raise
+ import traceback
+ info = traceback.format_exception(*sys.exc_info())
+ msg = info[-1].strip()
+ if len(info) >= 3:
+ # extract relevant file and position info
+ msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
+ print(file=sys.stderr)
+ print('*** Error while highlighting:', file=sys.stderr)
+ print(msg, file=sys.stderr)
+ print('*** If this is a bug you want to report, please rerun with -v.',
+ file=sys.stderr)
+ return 1
diff --git a/pygments/console.py b/pygments/console.py
new file mode 100644
index 0000000..2ada68e
--- /dev/null
+++ b/pygments/console.py
@@ -0,0 +1,70 @@
+"""
+ pygments.console
+ ~~~~~~~~~~~~~~~~
+
+ Format colored console output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+esc = "\x1b["
+
+codes = {}
+codes[""] = ""
+codes["reset"] = esc + "39;49;00m"
+
+codes["bold"] = esc + "01m"
+codes["faint"] = esc + "02m"
+codes["standout"] = esc + "03m"
+codes["underline"] = esc + "04m"
+codes["blink"] = esc + "05m"
+codes["overline"] = esc + "06m"
+
+dark_colors = ["black", "red", "green", "yellow", "blue",
+ "magenta", "cyan", "gray"]
+light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
+ "brightmagenta", "brightcyan", "white"]
+
+x = 30
+for d, l in zip(dark_colors, light_colors):
+ codes[d] = esc + "%im" % x
+ codes[l] = esc + "%im" % (60 + x)
+ x += 1
+
+del d, l, x
+
+codes["white"] = codes["bold"]
+
+
+def reset_color():
+ return codes["reset"]
+
+
+def colorize(color_key, text):
+ return codes[color_key] + text + codes["reset"]
+
+
+def ansiformat(attr, text):
+ """
+ Format ``text`` with a color and/or some attributes::
+
+ color normal color
+ *color* bold color
+ _color_ underlined color
+ +color+ blinking color
+ """
+ result = []
+ if attr[:1] == attr[-1:] == '+':
+ result.append(codes['blink'])
+ attr = attr[1:-1]
+ if attr[:1] == attr[-1:] == '*':
+ result.append(codes['bold'])
+ attr = attr[1:-1]
+ if attr[:1] == attr[-1:] == '_':
+ result.append(codes['underline'])
+ attr = attr[1:-1]
+ result.append(codes[attr])
+ result.append(text)
+ result.append(codes['reset'])
+ return ''.join(result)
diff --git a/pygments/filter.py b/pygments/filter.py
new file mode 100644
index 0000000..e5c9664
--- /dev/null
+++ b/pygments/filter.py
@@ -0,0 +1,71 @@
+"""
+ pygments.filter
+ ~~~~~~~~~~~~~~~
+
+ Module that implements the default filter.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+def apply_filters(stream, filters, lexer=None):
+ """
+ Use this method to apply an iterable of filters to
+ a stream. If lexer is given it's forwarded to the
+ filter, otherwise the filter receives `None`.
+ """
+ def _apply(filter_, stream):
+ yield from filter_.filter(lexer, stream)
+ for filter_ in filters:
+ stream = _apply(filter_, stream)
+ return stream
+
+
+def simplefilter(f):
+ """
+ Decorator that converts a function into a filter::
+
+ @simplefilter
+ def lowercase(self, lexer, stream, options):
+ for ttype, value in stream:
+ yield ttype, value.lower()
+ """
+ return type(f.__name__, (FunctionFilter,), {
+ '__module__': getattr(f, '__module__'),
+ '__doc__': f.__doc__,
+ 'function': f,
+ })
+
+
+class Filter:
+ """
+ Default filter. Subclass this class or use the `simplefilter`
+ decorator to create own filters.
+ """
+
+ def __init__(self, **options):
+ self.options = options
+
+ def filter(self, lexer, stream):
+ raise NotImplementedError()
+
+
+class FunctionFilter(Filter):
+ """
+ Abstract class used by `simplefilter` to create simple
+ function filters on the fly. The `simplefilter` decorator
+ automatically creates subclasses of this class for
+ functions passed to it.
+ """
+ function = None
+
+ def __init__(self, **options):
+ if not hasattr(self, 'function'):
+ raise TypeError('%r used without bound function' %
+ self.__class__.__name__)
+ Filter.__init__(self, **options)
+
+ def filter(self, lexer, stream):
+ # pylint: disable=not-callable
+ yield from self.function(lexer, stream, self.options)
diff --git a/pygments/filters/__init__.py b/pygments/filters/__init__.py
new file mode 100644
index 0000000..4e5c53f
--- /dev/null
+++ b/pygments/filters/__init__.py
@@ -0,0 +1,940 @@
+"""
+ pygments.filters
+ ~~~~~~~~~~~~~~~~
+
+ Module containing filter lookup functions and default
+ filters.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
+ string_to_tokentype
+from pygments.filter import Filter
+from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
+ get_choice_opt, ClassNotFound, OptionError
+from pygments.plugin import find_plugin_filters
+
+
+def find_filter_class(filtername):
+ """Lookup a filter by name. Return None if not found."""
+ if filtername in FILTERS:
+ return FILTERS[filtername]
+ for name, cls in find_plugin_filters():
+ if name == filtername:
+ return cls
+ return None
+
+
+def get_filter_by_name(filtername, **options):
+ """Return an instantiated filter.
+
+ Options are passed to the filter initializer if wanted.
+ Raise a ClassNotFound if not found.
+ """
+ cls = find_filter_class(filtername)
+ if cls:
+ return cls(**options)
+ else:
+ raise ClassNotFound('filter %r not found' % filtername)
+
+
+def get_all_filters():
+ """Return a generator of all filter names."""
+ yield from FILTERS
+ for name, _ in find_plugin_filters():
+ yield name
+
+
+def _replace_special(ttype, value, regex, specialttype,
+ replacefunc=lambda x: x):
+ last = 0
+ for match in regex.finditer(value):
+ start, end = match.start(), match.end()
+ if start != last:
+ yield ttype, value[last:start]
+ yield specialttype, replacefunc(value[start:end])
+ last = end
+ if last != len(value):
+ yield ttype, value[last:]
+
+
+class CodeTagFilter(Filter):
+ """Highlight special code tags in comments and docstrings.
+
+ Options accepted:
+
+ `codetags` : list of strings
+ A list of strings that are flagged as code tags. The default is to
+ highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
+
+ .. versionchanged:: 2.13
+ Now recognizes ``FIXME`` by default.
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ tags = get_list_opt(options, 'codetags',
+ ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
+ self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
+ re.escape(tag) for tag in tags if tag
+ ]))
+
+ def filter(self, lexer, stream):
+ regex = self.tag_re
+ for ttype, value in stream:
+ if ttype in String.Doc or \
+ ttype in Comment and \
+ ttype not in Comment.Preproc:
+ yield from _replace_special(ttype, value, regex, Comment.Special)
+ else:
+ yield ttype, value
+
+
+class SymbolFilter(Filter):
+ """Convert mathematical symbols such as \\<longrightarrow> in Isabelle
+ or \\longrightarrow in LaTeX into Unicode characters.
+
+ This is mostly useful for HTML or console output when you want to
+ approximate the source rendering you'd see in an IDE.
+
+ Options accepted:
+
+ `lang` : string
+ The symbol language. Must be one of ``'isabelle'`` or
+ ``'latex'``. The default is ``'isabelle'``.
+ """
+
+ latex_symbols = {
+ '\\alpha' : '\U000003b1',
+ '\\beta' : '\U000003b2',
+ '\\gamma' : '\U000003b3',
+ '\\delta' : '\U000003b4',
+ '\\varepsilon' : '\U000003b5',
+ '\\zeta' : '\U000003b6',
+ '\\eta' : '\U000003b7',
+ '\\vartheta' : '\U000003b8',
+ '\\iota' : '\U000003b9',
+ '\\kappa' : '\U000003ba',
+ '\\lambda' : '\U000003bb',
+ '\\mu' : '\U000003bc',
+ '\\nu' : '\U000003bd',
+ '\\xi' : '\U000003be',
+ '\\pi' : '\U000003c0',
+ '\\varrho' : '\U000003c1',
+ '\\sigma' : '\U000003c3',
+ '\\tau' : '\U000003c4',
+ '\\upsilon' : '\U000003c5',
+ '\\varphi' : '\U000003c6',
+ '\\chi' : '\U000003c7',
+ '\\psi' : '\U000003c8',
+ '\\omega' : '\U000003c9',
+ '\\Gamma' : '\U00000393',
+ '\\Delta' : '\U00000394',
+ '\\Theta' : '\U00000398',
+ '\\Lambda' : '\U0000039b',
+ '\\Xi' : '\U0000039e',
+ '\\Pi' : '\U000003a0',
+ '\\Sigma' : '\U000003a3',
+ '\\Upsilon' : '\U000003a5',
+ '\\Phi' : '\U000003a6',
+ '\\Psi' : '\U000003a8',
+ '\\Omega' : '\U000003a9',
+ '\\leftarrow' : '\U00002190',
+ '\\longleftarrow' : '\U000027f5',
+ '\\rightarrow' : '\U00002192',
+ '\\longrightarrow' : '\U000027f6',
+ '\\Leftarrow' : '\U000021d0',
+ '\\Longleftarrow' : '\U000027f8',
+ '\\Rightarrow' : '\U000021d2',
+ '\\Longrightarrow' : '\U000027f9',
+ '\\leftrightarrow' : '\U00002194',
+ '\\longleftrightarrow' : '\U000027f7',
+ '\\Leftrightarrow' : '\U000021d4',
+ '\\Longleftrightarrow' : '\U000027fa',
+ '\\mapsto' : '\U000021a6',
+ '\\longmapsto' : '\U000027fc',
+ '\\relbar' : '\U00002500',
+ '\\Relbar' : '\U00002550',
+ '\\hookleftarrow' : '\U000021a9',
+ '\\hookrightarrow' : '\U000021aa',
+ '\\leftharpoondown' : '\U000021bd',
+ '\\rightharpoondown' : '\U000021c1',
+ '\\leftharpoonup' : '\U000021bc',
+ '\\rightharpoonup' : '\U000021c0',
+ '\\rightleftharpoons' : '\U000021cc',
+ '\\leadsto' : '\U0000219d',
+ '\\downharpoonleft' : '\U000021c3',
+ '\\downharpoonright' : '\U000021c2',
+ '\\upharpoonleft' : '\U000021bf',
+ '\\upharpoonright' : '\U000021be',
+ '\\restriction' : '\U000021be',
+ '\\uparrow' : '\U00002191',
+ '\\Uparrow' : '\U000021d1',
+ '\\downarrow' : '\U00002193',
+ '\\Downarrow' : '\U000021d3',
+ '\\updownarrow' : '\U00002195',
+ '\\Updownarrow' : '\U000021d5',
+ '\\langle' : '\U000027e8',
+ '\\rangle' : '\U000027e9',
+ '\\lceil' : '\U00002308',
+ '\\rceil' : '\U00002309',
+ '\\lfloor' : '\U0000230a',
+ '\\rfloor' : '\U0000230b',
+ '\\flqq' : '\U000000ab',
+ '\\frqq' : '\U000000bb',
+ '\\bot' : '\U000022a5',
+ '\\top' : '\U000022a4',
+ '\\wedge' : '\U00002227',
+ '\\bigwedge' : '\U000022c0',
+ '\\vee' : '\U00002228',
+ '\\bigvee' : '\U000022c1',
+ '\\forall' : '\U00002200',
+ '\\exists' : '\U00002203',
+ '\\nexists' : '\U00002204',
+ '\\neg' : '\U000000ac',
+ '\\Box' : '\U000025a1',
+ '\\Diamond' : '\U000025c7',
+ '\\vdash' : '\U000022a2',
+ '\\models' : '\U000022a8',
+ '\\dashv' : '\U000022a3',
+ '\\surd' : '\U0000221a',
+ '\\le' : '\U00002264',
+ '\\ge' : '\U00002265',
+ '\\ll' : '\U0000226a',
+ '\\gg' : '\U0000226b',
+ '\\lesssim' : '\U00002272',
+ '\\gtrsim' : '\U00002273',
+ '\\lessapprox' : '\U00002a85',
+ '\\gtrapprox' : '\U00002a86',
+ '\\in' : '\U00002208',
+ '\\notin' : '\U00002209',
+ '\\subset' : '\U00002282',
+ '\\supset' : '\U00002283',
+ '\\subseteq' : '\U00002286',
+ '\\supseteq' : '\U00002287',
+ '\\sqsubset' : '\U0000228f',
+ '\\sqsupset' : '\U00002290',
+ '\\sqsubseteq' : '\U00002291',
+ '\\sqsupseteq' : '\U00002292',
+ '\\cap' : '\U00002229',
+ '\\bigcap' : '\U000022c2',
+ '\\cup' : '\U0000222a',
+ '\\bigcup' : '\U000022c3',
+ '\\sqcup' : '\U00002294',
+ '\\bigsqcup' : '\U00002a06',
+ '\\sqcap' : '\U00002293',
+ '\\Bigsqcap' : '\U00002a05',
+ '\\setminus' : '\U00002216',
+ '\\propto' : '\U0000221d',
+ '\\uplus' : '\U0000228e',
+ '\\bigplus' : '\U00002a04',
+ '\\sim' : '\U0000223c',
+ '\\doteq' : '\U00002250',
+ '\\simeq' : '\U00002243',
+ '\\approx' : '\U00002248',
+ '\\asymp' : '\U0000224d',
+ '\\cong' : '\U00002245',
+ '\\equiv' : '\U00002261',
+ '\\Join' : '\U000022c8',
+ '\\bowtie' : '\U00002a1d',
+ '\\prec' : '\U0000227a',
+ '\\succ' : '\U0000227b',
+ '\\preceq' : '\U0000227c',
+ '\\succeq' : '\U0000227d',
+ '\\parallel' : '\U00002225',
+ '\\mid' : '\U000000a6',
+ '\\pm' : '\U000000b1',
+ '\\mp' : '\U00002213',
+ '\\times' : '\U000000d7',
+ '\\div' : '\U000000f7',
+ '\\cdot' : '\U000022c5',
+ '\\star' : '\U000022c6',
+ '\\circ' : '\U00002218',
+ '\\dagger' : '\U00002020',
+ '\\ddagger' : '\U00002021',
+ '\\lhd' : '\U000022b2',
+ '\\rhd' : '\U000022b3',
+ '\\unlhd' : '\U000022b4',
+ '\\unrhd' : '\U000022b5',
+ '\\triangleleft' : '\U000025c3',
+ '\\triangleright' : '\U000025b9',
+ '\\triangle' : '\U000025b3',
+ '\\triangleq' : '\U0000225c',
+ '\\oplus' : '\U00002295',
+ '\\bigoplus' : '\U00002a01',
+ '\\otimes' : '\U00002297',
+ '\\bigotimes' : '\U00002a02',
+ '\\odot' : '\U00002299',
+ '\\bigodot' : '\U00002a00',
+ '\\ominus' : '\U00002296',
+ '\\oslash' : '\U00002298',
+ '\\dots' : '\U00002026',
+ '\\cdots' : '\U000022ef',
+ '\\sum' : '\U00002211',
+ '\\prod' : '\U0000220f',
+ '\\coprod' : '\U00002210',
+ '\\infty' : '\U0000221e',
+ '\\int' : '\U0000222b',
+ '\\oint' : '\U0000222e',
+ '\\clubsuit' : '\U00002663',
+ '\\diamondsuit' : '\U00002662',
+ '\\heartsuit' : '\U00002661',
+ '\\spadesuit' : '\U00002660',
+ '\\aleph' : '\U00002135',
+ '\\emptyset' : '\U00002205',
+ '\\nabla' : '\U00002207',
+ '\\partial' : '\U00002202',
+ '\\flat' : '\U0000266d',
+ '\\natural' : '\U0000266e',
+ '\\sharp' : '\U0000266f',
+ '\\angle' : '\U00002220',
+ '\\copyright' : '\U000000a9',
+ '\\textregistered' : '\U000000ae',
+ '\\textonequarter' : '\U000000bc',
+ '\\textonehalf' : '\U000000bd',
+ '\\textthreequarters' : '\U000000be',
+ '\\textordfeminine' : '\U000000aa',
+ '\\textordmasculine' : '\U000000ba',
+ '\\euro' : '\U000020ac',
+ '\\pounds' : '\U000000a3',
+ '\\yen' : '\U000000a5',
+ '\\textcent' : '\U000000a2',
+ '\\textcurrency' : '\U000000a4',
+ '\\textdegree' : '\U000000b0',
+ }
+
+ isabelle_symbols = {
+ '\\<zero>' : '\U0001d7ec',
+ '\\<one>' : '\U0001d7ed',
+ '\\<two>' : '\U0001d7ee',
+ '\\<three>' : '\U0001d7ef',
+ '\\<four>' : '\U0001d7f0',
+ '\\<five>' : '\U0001d7f1',
+ '\\<six>' : '\U0001d7f2',
+ '\\<seven>' : '\U0001d7f3',
+ '\\<eight>' : '\U0001d7f4',
+ '\\<nine>' : '\U0001d7f5',
+ '\\<A>' : '\U0001d49c',
+ '\\<B>' : '\U0000212c',
+ '\\<C>' : '\U0001d49e',
+ '\\<D>' : '\U0001d49f',
+ '\\<E>' : '\U00002130',
+ '\\<F>' : '\U00002131',
+ '\\<G>' : '\U0001d4a2',
+ '\\<H>' : '\U0000210b',
+ '\\<I>' : '\U00002110',
+ '\\<J>' : '\U0001d4a5',
+ '\\<K>' : '\U0001d4a6',
+ '\\<L>' : '\U00002112',
+ '\\<M>' : '\U00002133',
+ '\\<N>' : '\U0001d4a9',
+ '\\<O>' : '\U0001d4aa',
+ '\\<P>' : '\U0001d4ab',
+ '\\<Q>' : '\U0001d4ac',
+ '\\<R>' : '\U0000211b',
+ '\\<S>' : '\U0001d4ae',
+ '\\<T>' : '\U0001d4af',
+ '\\<U>' : '\U0001d4b0',
+ '\\<V>' : '\U0001d4b1',
+ '\\<W>' : '\U0001d4b2',
+ '\\<X>' : '\U0001d4b3',
+ '\\<Y>' : '\U0001d4b4',
+ '\\<Z>' : '\U0001d4b5',
+ '\\<a>' : '\U0001d5ba',
+ '\\<b>' : '\U0001d5bb',
+ '\\<c>' : '\U0001d5bc',
+ '\\<d>' : '\U0001d5bd',
+ '\\<e>' : '\U0001d5be',
+ '\\<f>' : '\U0001d5bf',
+ '\\<g>' : '\U0001d5c0',
+ '\\<h>' : '\U0001d5c1',
+ '\\<i>' : '\U0001d5c2',
+ '\\<j>' : '\U0001d5c3',
+ '\\<k>' : '\U0001d5c4',
+ '\\<l>' : '\U0001d5c5',
+ '\\<m>' : '\U0001d5c6',
+ '\\<n>' : '\U0001d5c7',
+ '\\<o>' : '\U0001d5c8',
+ '\\<p>' : '\U0001d5c9',
+ '\\<q>' : '\U0001d5ca',
+ '\\<r>' : '\U0001d5cb',
+ '\\<s>' : '\U0001d5cc',
+ '\\<t>' : '\U0001d5cd',
+ '\\<u>' : '\U0001d5ce',
+ '\\<v>' : '\U0001d5cf',
+ '\\<w>' : '\U0001d5d0',
+ '\\<x>' : '\U0001d5d1',
+ '\\<y>' : '\U0001d5d2',
+ '\\<z>' : '\U0001d5d3',
+ '\\<AA>' : '\U0001d504',
+ '\\<BB>' : '\U0001d505',
+ '\\<CC>' : '\U0000212d',
+ '\\<DD>' : '\U0001d507',
+ '\\<EE>' : '\U0001d508',
+ '\\<FF>' : '\U0001d509',
+ '\\<GG>' : '\U0001d50a',
+ '\\<HH>' : '\U0000210c',
+ '\\<II>' : '\U00002111',
+ '\\<JJ>' : '\U0001d50d',
+ '\\<KK>' : '\U0001d50e',
+ '\\<LL>' : '\U0001d50f',
+ '\\<MM>' : '\U0001d510',
+ '\\<NN>' : '\U0001d511',
+ '\\<OO>' : '\U0001d512',
+ '\\<PP>' : '\U0001d513',
+ '\\<QQ>' : '\U0001d514',
+ '\\<RR>' : '\U0000211c',
+ '\\<SS>' : '\U0001d516',
+ '\\<TT>' : '\U0001d517',
+ '\\<UU>' : '\U0001d518',
+ '\\<VV>' : '\U0001d519',
+ '\\<WW>' : '\U0001d51a',
+ '\\<XX>' : '\U0001d51b',
+ '\\<YY>' : '\U0001d51c',
+ '\\<ZZ>' : '\U00002128',
+ '\\<aa>' : '\U0001d51e',
+ '\\<bb>' : '\U0001d51f',
+ '\\<cc>' : '\U0001d520',
+ '\\<dd>' : '\U0001d521',
+ '\\<ee>' : '\U0001d522',
+ '\\<ff>' : '\U0001d523',
+ '\\<gg>' : '\U0001d524',
+ '\\<hh>' : '\U0001d525',
+ '\\<ii>' : '\U0001d526',
+ '\\<jj>' : '\U0001d527',
+ '\\<kk>' : '\U0001d528',
+ '\\<ll>' : '\U0001d529',
+ '\\<mm>' : '\U0001d52a',
+ '\\<nn>' : '\U0001d52b',
+ '\\<oo>' : '\U0001d52c',
+ '\\<pp>' : '\U0001d52d',
+ '\\<qq>' : '\U0001d52e',
+ '\\<rr>' : '\U0001d52f',
+ '\\<ss>' : '\U0001d530',
+ '\\<tt>' : '\U0001d531',
+ '\\<uu>' : '\U0001d532',
+ '\\<vv>' : '\U0001d533',
+ '\\<ww>' : '\U0001d534',
+ '\\<xx>' : '\U0001d535',
+ '\\<yy>' : '\U0001d536',
+ '\\<zz>' : '\U0001d537',
+ '\\<alpha>' : '\U000003b1',
+ '\\<beta>' : '\U000003b2',
+ '\\<gamma>' : '\U000003b3',
+ '\\<delta>' : '\U000003b4',
+ '\\<epsilon>' : '\U000003b5',
+ '\\<zeta>' : '\U000003b6',
+ '\\<eta>' : '\U000003b7',
+ '\\<theta>' : '\U000003b8',
+ '\\<iota>' : '\U000003b9',
+ '\\<kappa>' : '\U000003ba',
+ '\\<lambda>' : '\U000003bb',
+ '\\<mu>' : '\U000003bc',
+ '\\<nu>' : '\U000003bd',
+ '\\<xi>' : '\U000003be',
+ '\\<pi>' : '\U000003c0',
+ '\\<rho>' : '\U000003c1',
+ '\\<sigma>' : '\U000003c3',
+ '\\<tau>' : '\U000003c4',
+ '\\<upsilon>' : '\U000003c5',
+ '\\<phi>' : '\U000003c6',
+ '\\<chi>' : '\U000003c7',
+ '\\<psi>' : '\U000003c8',
+ '\\<omega>' : '\U000003c9',
+ '\\<Gamma>' : '\U00000393',
+ '\\<Delta>' : '\U00000394',
+ '\\<Theta>' : '\U00000398',
+ '\\<Lambda>' : '\U0000039b',
+ '\\<Xi>' : '\U0000039e',
+ '\\<Pi>' : '\U000003a0',
+ '\\<Sigma>' : '\U000003a3',
+ '\\<Upsilon>' : '\U000003a5',
+ '\\<Phi>' : '\U000003a6',
+ '\\<Psi>' : '\U000003a8',
+ '\\<Omega>' : '\U000003a9',
+ '\\<bool>' : '\U0001d539',
+ '\\<complex>' : '\U00002102',
+ '\\<nat>' : '\U00002115',
+ '\\<rat>' : '\U0000211a',
+ '\\<real>' : '\U0000211d',
+ '\\<int>' : '\U00002124',
+ '\\<leftarrow>' : '\U00002190',
+ '\\<longleftarrow>' : '\U000027f5',
+ '\\<rightarrow>' : '\U00002192',
+ '\\<longrightarrow>' : '\U000027f6',
+ '\\<Leftarrow>' : '\U000021d0',
+ '\\<Longleftarrow>' : '\U000027f8',
+ '\\<Rightarrow>' : '\U000021d2',
+ '\\<Longrightarrow>' : '\U000027f9',
+ '\\<leftrightarrow>' : '\U00002194',
+ '\\<longleftrightarrow>' : '\U000027f7',
+ '\\<Leftrightarrow>' : '\U000021d4',
+ '\\<Longleftrightarrow>' : '\U000027fa',
+ '\\<mapsto>' : '\U000021a6',
+ '\\<longmapsto>' : '\U000027fc',
+ '\\<midarrow>' : '\U00002500',
+ '\\<Midarrow>' : '\U00002550',
+ '\\<hookleftarrow>' : '\U000021a9',
+ '\\<hookrightarrow>' : '\U000021aa',
+ '\\<leftharpoondown>' : '\U000021bd',
+ '\\<rightharpoondown>' : '\U000021c1',
+ '\\<leftharpoonup>' : '\U000021bc',
+ '\\<rightharpoonup>' : '\U000021c0',
+ '\\<rightleftharpoons>' : '\U000021cc',
+ '\\<leadsto>' : '\U0000219d',
+ '\\<downharpoonleft>' : '\U000021c3',
+ '\\<downharpoonright>' : '\U000021c2',
+ '\\<upharpoonleft>' : '\U000021bf',
+ '\\<upharpoonright>' : '\U000021be',
+ '\\<restriction>' : '\U000021be',
+ '\\<Colon>' : '\U00002237',
+ '\\<up>' : '\U00002191',
+ '\\<Up>' : '\U000021d1',
+ '\\<down>' : '\U00002193',
+ '\\<Down>' : '\U000021d3',
+ '\\<updown>' : '\U00002195',
+ '\\<Updown>' : '\U000021d5',
+ '\\<langle>' : '\U000027e8',
+ '\\<rangle>' : '\U000027e9',
+ '\\<lceil>' : '\U00002308',
+ '\\<rceil>' : '\U00002309',
+ '\\<lfloor>' : '\U0000230a',
+ '\\<rfloor>' : '\U0000230b',
+ '\\<lparr>' : '\U00002987',
+ '\\<rparr>' : '\U00002988',
+ '\\<lbrakk>' : '\U000027e6',
+ '\\<rbrakk>' : '\U000027e7',
+ '\\<lbrace>' : '\U00002983',
+ '\\<rbrace>' : '\U00002984',
+ '\\<guillemotleft>' : '\U000000ab',
+ '\\<guillemotright>' : '\U000000bb',
+ '\\<bottom>' : '\U000022a5',
+ '\\<top>' : '\U000022a4',
+ '\\<and>' : '\U00002227',
+ '\\<And>' : '\U000022c0',
+ '\\<or>' : '\U00002228',
+ '\\<Or>' : '\U000022c1',
+ '\\<forall>' : '\U00002200',
+ '\\<exists>' : '\U00002203',
+ '\\<nexists>' : '\U00002204',
+ '\\<not>' : '\U000000ac',
+ '\\<box>' : '\U000025a1',
+ '\\<diamond>' : '\U000025c7',
+ '\\<turnstile>' : '\U000022a2',
+ '\\<Turnstile>' : '\U000022a8',
+ '\\<tturnstile>' : '\U000022a9',
+ '\\<TTurnstile>' : '\U000022ab',
+ '\\<stileturn>' : '\U000022a3',
+ '\\<surd>' : '\U0000221a',
+ '\\<le>' : '\U00002264',
+ '\\<ge>' : '\U00002265',
+ '\\<lless>' : '\U0000226a',
+ '\\<ggreater>' : '\U0000226b',
+ '\\<lesssim>' : '\U00002272',
+ '\\<greatersim>' : '\U00002273',
+ '\\<lessapprox>' : '\U00002a85',
+ '\\<greaterapprox>' : '\U00002a86',
+ '\\<in>' : '\U00002208',
+ '\\<notin>' : '\U00002209',
+ '\\<subset>' : '\U00002282',
+ '\\<supset>' : '\U00002283',
+ '\\<subseteq>' : '\U00002286',
+ '\\<supseteq>' : '\U00002287',
+ '\\<sqsubset>' : '\U0000228f',
+ '\\<sqsupset>' : '\U00002290',
+ '\\<sqsubseteq>' : '\U00002291',
+ '\\<sqsupseteq>' : '\U00002292',
+ '\\<inter>' : '\U00002229',
+ '\\<Inter>' : '\U000022c2',
+ '\\<union>' : '\U0000222a',
+ '\\<Union>' : '\U000022c3',
+ '\\<squnion>' : '\U00002294',
+ '\\<Squnion>' : '\U00002a06',
+ '\\<sqinter>' : '\U00002293',
+ '\\<Sqinter>' : '\U00002a05',
+ '\\<setminus>' : '\U00002216',
+ '\\<propto>' : '\U0000221d',
+ '\\<uplus>' : '\U0000228e',
+ '\\<Uplus>' : '\U00002a04',
+ '\\<noteq>' : '\U00002260',
+ '\\<sim>' : '\U0000223c',
+ '\\<doteq>' : '\U00002250',
+ '\\<simeq>' : '\U00002243',
+ '\\<approx>' : '\U00002248',
+ '\\<asymp>' : '\U0000224d',
+ '\\<cong>' : '\U00002245',
+ '\\<smile>' : '\U00002323',
+ '\\<equiv>' : '\U00002261',
+ '\\<frown>' : '\U00002322',
+ '\\<Join>' : '\U000022c8',
+ '\\<bowtie>' : '\U00002a1d',
+ '\\<prec>' : '\U0000227a',
+ '\\<succ>' : '\U0000227b',
+ '\\<preceq>' : '\U0000227c',
+ '\\<succeq>' : '\U0000227d',
+ '\\<parallel>' : '\U00002225',
+ '\\<bar>' : '\U000000a6',
+ '\\<plusminus>' : '\U000000b1',
+ '\\<minusplus>' : '\U00002213',
+ '\\<times>' : '\U000000d7',
+ '\\<div>' : '\U000000f7',
+ '\\<cdot>' : '\U000022c5',
+ '\\<star>' : '\U000022c6',
+ '\\<bullet>' : '\U00002219',
+ '\\<circ>' : '\U00002218',
+ '\\<dagger>' : '\U00002020',
+ '\\<ddagger>' : '\U00002021',
+ '\\<lhd>' : '\U000022b2',
+ '\\<rhd>' : '\U000022b3',
+ '\\<unlhd>' : '\U000022b4',
+ '\\<unrhd>' : '\U000022b5',
+ '\\<triangleleft>' : '\U000025c3',
+ '\\<triangleright>' : '\U000025b9',
+ '\\<triangle>' : '\U000025b3',
+ '\\<triangleq>' : '\U0000225c',
+ '\\<oplus>' : '\U00002295',
+ '\\<Oplus>' : '\U00002a01',
+ '\\<otimes>' : '\U00002297',
+ '\\<Otimes>' : '\U00002a02',
+ '\\<odot>' : '\U00002299',
+ '\\<Odot>' : '\U00002a00',
+ '\\<ominus>' : '\U00002296',
+ '\\<oslash>' : '\U00002298',
+ '\\<dots>' : '\U00002026',
+ '\\<cdots>' : '\U000022ef',
+ '\\<Sum>' : '\U00002211',
+ '\\<Prod>' : '\U0000220f',
+ '\\<Coprod>' : '\U00002210',
+ '\\<infinity>' : '\U0000221e',
+ '\\<integral>' : '\U0000222b',
+ '\\<ointegral>' : '\U0000222e',
+ '\\<clubsuit>' : '\U00002663',
+ '\\<diamondsuit>' : '\U00002662',
+ '\\<heartsuit>' : '\U00002661',
+ '\\<spadesuit>' : '\U00002660',
+ '\\<aleph>' : '\U00002135',
+ '\\<emptyset>' : '\U00002205',
+ '\\<nabla>' : '\U00002207',
+ '\\<partial>' : '\U00002202',
+ '\\<flat>' : '\U0000266d',
+ '\\<natural>' : '\U0000266e',
+ '\\<sharp>' : '\U0000266f',
+ '\\<angle>' : '\U00002220',
+ '\\<copyright>' : '\U000000a9',
+ '\\<registered>' : '\U000000ae',
+ '\\<hyphen>' : '\U000000ad',
+ '\\<inverse>' : '\U000000af',
+ '\\<onequarter>' : '\U000000bc',
+ '\\<onehalf>' : '\U000000bd',
+ '\\<threequarters>' : '\U000000be',
+ '\\<ordfeminine>' : '\U000000aa',
+ '\\<ordmasculine>' : '\U000000ba',
+ '\\<section>' : '\U000000a7',
+ '\\<paragraph>' : '\U000000b6',
+ '\\<exclamdown>' : '\U000000a1',
+ '\\<questiondown>' : '\U000000bf',
+ '\\<euro>' : '\U000020ac',
+ '\\<pounds>' : '\U000000a3',
+ '\\<yen>' : '\U000000a5',
+ '\\<cent>' : '\U000000a2',
+ '\\<currency>' : '\U000000a4',
+ '\\<degree>' : '\U000000b0',
+ '\\<amalg>' : '\U00002a3f',
+ '\\<mho>' : '\U00002127',
+ '\\<lozenge>' : '\U000025ca',
+ '\\<wp>' : '\U00002118',
+ '\\<wrong>' : '\U00002240',
+ '\\<struct>' : '\U000022c4',
+ '\\<acute>' : '\U000000b4',
+ '\\<index>' : '\U00000131',
+ '\\<dieresis>' : '\U000000a8',
+ '\\<cedilla>' : '\U000000b8',
+ '\\<hungarumlaut>' : '\U000002dd',
+ '\\<some>' : '\U000003f5',
+ '\\<newline>' : '\U000023ce',
+ '\\<open>' : '\U00002039',
+ '\\<close>' : '\U0000203a',
+ '\\<here>' : '\U00002302',
+ '\\<^sub>' : '\U000021e9',
+ '\\<^sup>' : '\U000021e7',
+ '\\<^bold>' : '\U00002759',
+ '\\<^bsub>' : '\U000021d8',
+ '\\<^esub>' : '\U000021d9',
+ '\\<^bsup>' : '\U000021d7',
+ '\\<^esup>' : '\U000021d6',
+ }
+
+ lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ lang = get_choice_opt(options, 'lang',
+ ['isabelle', 'latex'], 'isabelle')
+ self.symbols = self.lang_map[lang]
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if value in self.symbols:
+ yield ttype, self.symbols[value]
+ else:
+ yield ttype, value
+
+
+class KeywordCaseFilter(Filter):
+ """Convert keywords to lowercase or uppercase or capitalize them, which
+ means first letter uppercase, rest lowercase.
+
+ This can be useful e.g. if you highlight Pascal code and want to adapt the
+ code to your styleguide.
+
+ Options accepted:
+
+ `case` : string
+ The casing to convert keywords to. Must be one of ``'lower'``,
+ ``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ case = get_choice_opt(options, 'case',
+ ['lower', 'upper', 'capitalize'], 'lower')
+ self.convert = getattr(str, case)
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype in Keyword:
+ yield ttype, self.convert(value)
+ else:
+ yield ttype, value
+
+
+class NameHighlightFilter(Filter):
+ """Highlight a normal Name (and Name.*) token with a different token type.
+
+ Example::
+
+ filter = NameHighlightFilter(
+ names=['foo', 'bar', 'baz'],
+ tokentype=Name.Function,
+ )
+
+ This would highlight the names "foo", "bar" and "baz"
+ as functions. `Name.Function` is the default token type.
+
+ Options accepted:
+
+ `names` : list of strings
+ A list of names that should be given the different token type.
+ There is no default.
+ `tokentype` : TokenType or string
+ A token type or a string containing a token type name that is
+ used for highlighting the strings in `names`. The default is
+ `Name.Function`.
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.names = set(get_list_opt(options, 'names', []))
+ tokentype = options.get('tokentype')
+ if tokentype:
+ self.tokentype = string_to_tokentype(tokentype)
+ else:
+ self.tokentype = Name.Function
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype in Name and value in self.names:
+ yield self.tokentype, value
+ else:
+ yield ttype, value
+
+
+class ErrorToken(Exception):
+ pass
+
+
+class RaiseOnErrorTokenFilter(Filter):
+ """Raise an exception when the lexer generates an error token.
+
+ Options accepted:
+
+ `excclass` : Exception class
+ The exception class to raise.
+ The default is `pygments.filters.ErrorToken`.
+
+ .. versionadded:: 0.8
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.exception = options.get('excclass', ErrorToken)
+ try:
+ # issubclass() will raise TypeError if first argument is not a class
+ if not issubclass(self.exception, Exception):
+ raise TypeError
+ except TypeError:
+ raise OptionError('excclass option is not an exception class')
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype is Error:
+ raise self.exception(value)
+ yield ttype, value
+
+
+class VisibleWhitespaceFilter(Filter):
+ """Convert tabs, newlines and/or spaces to visible characters.
+
+ Options accepted:
+
+ `spaces` : string or bool
+ If this is a one-character string, spaces will be replaces by this string.
+ If it is another true value, spaces will be replaced by ``·`` (unicode
+ MIDDLE DOT). If it is a false value, spaces will not be replaced. The
+ default is ``False``.
+ `tabs` : string or bool
+ The same as for `spaces`, but the default replacement character is ``»``
+ (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
+ is ``False``. Note: this will not work if the `tabsize` option for the
+ lexer is nonzero, as tabs will already have been expanded then.
+ `tabsize` : int
+ If tabs are to be replaced by this filter (see the `tabs` option), this
+ is the total number of characters that a tab should be expanded to.
+ The default is ``8``.
+ `newlines` : string or bool
+ The same as for `spaces`, but the default replacement character is ``¶``
+ (unicode PILCROW SIGN). The default value is ``False``.
+ `wstokentype` : bool
+ If true, give whitespace the special `Whitespace` token type. This allows
+ styling the visible whitespace differently (e.g. greyed out), but it can
+ disrupt background colors. The default is ``True``.
+
+ .. versionadded:: 0.8
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ for name, default in [('spaces', '·'),
+ ('tabs', '»'),
+ ('newlines', '¶')]:
+ opt = options.get(name, False)
+ if isinstance(opt, str) and len(opt) == 1:
+ setattr(self, name, opt)
+ else:
+ setattr(self, name, (opt and default or ''))
+ tabsize = get_int_opt(options, 'tabsize', 8)
+ if self.tabs:
+ self.tabs += ' ' * (tabsize - 1)
+ if self.newlines:
+ self.newlines += '\n'
+ self.wstt = get_bool_opt(options, 'wstokentype', True)
+
+ def filter(self, lexer, stream):
+ if self.wstt:
+ spaces = self.spaces or ' '
+ tabs = self.tabs or '\t'
+ newlines = self.newlines or '\n'
+ regex = re.compile(r'\s')
+
+ def replacefunc(wschar):
+ if wschar == ' ':
+ return spaces
+ elif wschar == '\t':
+ return tabs
+ elif wschar == '\n':
+ return newlines
+ return wschar
+
+ for ttype, value in stream:
+ yield from _replace_special(ttype, value, regex, Whitespace,
+ replacefunc)
+ else:
+ spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
+ # simpler processing
+ for ttype, value in stream:
+ if spaces:
+ value = value.replace(' ', spaces)
+ if tabs:
+ value = value.replace('\t', tabs)
+ if newlines:
+ value = value.replace('\n', newlines)
+ yield ttype, value
+
+
+class GobbleFilter(Filter):
+ """Gobbles source code lines (eats initial characters).
+
+ This filter drops the first ``n`` characters off every line of code. This
+ may be useful when the source code fed to the lexer is indented by a fixed
+ amount of space that isn't desired in the output.
+
+ Options accepted:
+
+ `n` : int
+ The number of characters to gobble.
+
+ .. versionadded:: 1.2
+ """
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.n = get_int_opt(options, 'n', 0)
+
+ def gobble(self, value, left):
+ if left < len(value):
+ return value[left:], 0
+ else:
+ return '', left - len(value)
+
+ def filter(self, lexer, stream):
+ n = self.n
+ left = n # How many characters left to gobble.
+ for ttype, value in stream:
+ # Remove ``left`` tokens from first line, ``n`` from all others.
+ parts = value.split('\n')
+ (parts[0], left) = self.gobble(parts[0], left)
+ for i in range(1, len(parts)):
+ (parts[i], left) = self.gobble(parts[i], n)
+ value = '\n'.join(parts)
+
+ if value != '':
+ yield ttype, value
+
+
+class TokenMergeFilter(Filter):
+ """Merges consecutive tokens with the same token type in the output
+ stream of a lexer.
+
+ .. versionadded:: 1.2
+ """
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+
+ def filter(self, lexer, stream):
+ current_type = None
+ current_value = None
+ for ttype, value in stream:
+ if ttype is current_type:
+ current_value += value
+ else:
+ if current_type is not None:
+ yield current_type, current_value
+ current_type = ttype
+ current_value = value
+ if current_type is not None:
+ yield current_type, current_value
+
+
+FILTERS = {
+ 'codetagify': CodeTagFilter,
+ 'keywordcase': KeywordCaseFilter,
+ 'highlight': NameHighlightFilter,
+ 'raiseonerror': RaiseOnErrorTokenFilter,
+ 'whitespace': VisibleWhitespaceFilter,
+ 'gobble': GobbleFilter,
+ 'tokenmerge': TokenMergeFilter,
+ 'symbols': SymbolFilter,
+}
diff --git a/pygments/formatter.py b/pygments/formatter.py
new file mode 100644
index 0000000..e1455c5
--- /dev/null
+++ b/pygments/formatter.py
@@ -0,0 +1,94 @@
+"""
+ pygments.formatter
+ ~~~~~~~~~~~~~~~~~~
+
+ Base formatter class.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import codecs
+
+from pygments.util import get_bool_opt
+from pygments.styles import get_style_by_name
+
+__all__ = ['Formatter']
+
+
+def _lookup_style(style):
+ if isinstance(style, str):
+ return get_style_by_name(style)
+ return style
+
+
+class Formatter:
+ """
+ Converts a token stream to text.
+
+ Options accepted:
+
+ ``style``
+ The style to use, can be a string or a Style subclass
+ (default: "default"). Not used by e.g. the
+ TerminalFormatter.
+ ``full``
+ Tells the formatter to output a "full" document, i.e.
+ a complete self-contained document. This doesn't have
+ any effect for some formatters (default: false).
+ ``title``
+ If ``full`` is true, the title that should be used to
+ caption the document (default: '').
+ ``encoding``
+ If given, must be an encoding name. This will be used to
+ convert the Unicode token strings to byte strings in the
+ output. If it is "" or None, Unicode strings will be written
+ to the output file, which most file-like objects do not
+ support (default: None).
+ ``outencoding``
+ Overrides ``encoding`` if given.
+ """
+
+ #: Name of the formatter
+ name = None
+
+ #: Shortcuts for the formatter
+ aliases = []
+
+ #: fn match rules
+ filenames = []
+
+ #: If True, this formatter outputs Unicode strings when no encoding
+ #: option is given.
+ unicodeoutput = True
+
+ def __init__(self, **options):
+ self.style = _lookup_style(options.get('style', 'default'))
+ self.full = get_bool_opt(options, 'full', False)
+ self.title = options.get('title', '')
+ self.encoding = options.get('encoding', None) or None
+ if self.encoding in ('guess', 'chardet'):
+ # can happen for e.g. pygmentize -O encoding=guess
+ self.encoding = 'utf-8'
+ self.encoding = options.get('outencoding') or self.encoding
+ self.options = options
+
+ def get_style_defs(self, arg=''):
+ """
+ Return the style definitions for the current style as a string.
+
+ ``arg`` is an additional argument whose meaning depends on the
+ formatter used. Note that ``arg`` can also be a list or tuple
+ for some formatters like the html formatter.
+ """
+ return ''
+
+ def format(self, tokensource, outfile):
+ """
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
+ tuples and write it into ``outfile``.
+ """
+ if self.encoding:
+ # wrap the outfile in a StreamWriter
+ outfile = codecs.lookup(self.encoding)[3](outfile)
+ return self.format_unencoded(tokensource, outfile)
diff --git a/pygments/formatters/__init__.py b/pygments/formatters/__init__.py
new file mode 100644
index 0000000..58de5fe
--- /dev/null
+++ b/pygments/formatters/__init__.py
@@ -0,0 +1,142 @@
+"""
+ pygments.formatters
+ ~~~~~~~~~~~~~~~~~~~
+
+ Pygments formatters.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import types
+from fnmatch import fnmatch
+from os.path import basename
+
+from pygments.formatters._mapping import FORMATTERS
+from pygments.plugin import find_plugin_formatters
+from pygments.util import ClassNotFound
+
+__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
+ 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
+
+_formatter_cache = {} # classes by name
+
+def _load_formatters(module_name):
+ """Load a formatter (and all others in the module too)."""
+ mod = __import__(module_name, None, None, ['__all__'])
+ for formatter_name in mod.__all__:
+ cls = getattr(mod, formatter_name)
+ _formatter_cache[cls.name] = cls
+
+
+def get_all_formatters():
+ """Return a generator for all formatter classes."""
+ # NB: this returns formatter classes, not info like get_all_lexers().
+ for info in FORMATTERS.values():
+ if info[1] not in _formatter_cache:
+ _load_formatters(info[0])
+ yield _formatter_cache[info[1]]
+ for _, formatter in find_plugin_formatters():
+ yield formatter
+
+
+def find_formatter_class(alias):
+ """Lookup a formatter by alias.
+
+ Returns None if not found.
+ """
+ for module_name, name, aliases, _, _ in FORMATTERS.values():
+ if alias in aliases:
+ if name not in _formatter_cache:
+ _load_formatters(module_name)
+ return _formatter_cache[name]
+ for _, cls in find_plugin_formatters():
+ if alias in cls.aliases:
+ return cls
+
+
+def get_formatter_by_name(_alias, **options):
+ """Lookup and instantiate a formatter by alias.
+
+ Raises ClassNotFound if not found.
+ """
+ cls = find_formatter_class(_alias)
+ if cls is None:
+ raise ClassNotFound("no formatter found for name %r" % _alias)
+ return cls(**options)
+
+
+def load_formatter_from_file(filename, formattername="CustomFormatter",
+ **options):
+ """Load a formatter from a file.
+
+ This method expects a file located relative to the current working
+ directory, which contains a class named CustomFormatter. By default,
+ it expects the Formatter to be named CustomFormatter; you can specify
+ your own class name as the second argument to this function.
+
+ Users should be very careful with the input, because this method
+ is equivalent to running eval on the input file.
+
+ Raises ClassNotFound if there are any problems importing the Formatter.
+
+ .. versionadded:: 2.2
+ """
+ try:
+ # This empty dict will contain the namespace for the exec'd file
+ custom_namespace = {}
+ with open(filename, 'rb') as f:
+ exec(f.read(), custom_namespace)
+ # Retrieve the class `formattername` from that namespace
+ if formattername not in custom_namespace:
+ raise ClassNotFound('no valid %s class found in %s' %
+ (formattername, filename))
+ formatter_class = custom_namespace[formattername]
+ # And finally instantiate it with the options
+ return formatter_class(**options)
+ except OSError as err:
+ raise ClassNotFound('cannot read %s: %s' % (filename, err))
+ except ClassNotFound:
+ raise
+ except Exception as err:
+ raise ClassNotFound('error when loading custom formatter: %s' % err)
+
+
+def get_formatter_for_filename(fn, **options):
+ """Lookup and instantiate a formatter by filename pattern.
+
+ Raises ClassNotFound if not found.
+ """
+ fn = basename(fn)
+ for modname, name, _, filenames, _ in FORMATTERS.values():
+ for filename in filenames:
+ if fnmatch(fn, filename):
+ if name not in _formatter_cache:
+ _load_formatters(modname)
+ return _formatter_cache[name](**options)
+ for cls in find_plugin_formatters():
+ for filename in cls.filenames:
+ if fnmatch(fn, filename):
+ return cls(**options)
+ raise ClassNotFound("no formatter found for file name %r" % fn)
+
+
+class _automodule(types.ModuleType):
+ """Automatically import formatters."""
+
+ def __getattr__(self, name):
+ info = FORMATTERS.get(name)
+ if info:
+ _load_formatters(info[0])
+ cls = _formatter_cache[info[1]]
+ setattr(self, name, cls)
+ return cls
+ raise AttributeError(name)
+
+
+oldmod = sys.modules[__name__]
+newmod = _automodule(__name__)
+newmod.__dict__.update(oldmod.__dict__)
+sys.modules[__name__] = newmod
+del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
diff --git a/pygments/formatters/_mapping.py b/pygments/formatters/_mapping.py
new file mode 100755
index 0000000..6e34f96
--- /dev/null
+++ b/pygments/formatters/_mapping.py
@@ -0,0 +1,23 @@
+# Automatically generated by scripts/gen_mapfiles.py.
+# DO NOT EDIT BY HAND; run `make mapfiles` instead.
+
+FORMATTERS = {
+ 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
+ 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
+ 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
+ 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
+ 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
+ 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
+ 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
+ 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
+ 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
+ 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
+ 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
+ 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
+ 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
+ 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
+}
diff --git a/pygments/formatters/bbcode.py b/pygments/formatters/bbcode.py
new file mode 100644
index 0000000..8b23b35
--- /dev/null
+++ b/pygments/formatters/bbcode.py
@@ -0,0 +1,108 @@
+"""
+ pygments.formatters.bbcode
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ BBcode formatter.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+from pygments.formatter import Formatter
+from pygments.util import get_bool_opt
+
+__all__ = ['BBCodeFormatter']
+
+
+class BBCodeFormatter(Formatter):
+ """
+ Format tokens with BBcodes. These formatting codes are used by many
+ bulletin boards, so you can highlight your sourcecode with pygments before
+ posting it there.
+
+ This formatter has no support for background colors and borders, as there
+ are no common BBcode tags for that.
+
+ Some board systems (e.g. phpBB) don't support colors in their [code] tag,
+ so you can't use the highlighting together with that tag.
+ Text in a [code] tag usually is shown with a monospace font (which this
+ formatter can do with the ``monofont`` option) and no spaces (which you
+ need for indentation) are removed.
+
+ Additional options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `codetag`
+ If set to true, put the output into ``[code]`` tags (default:
+ ``false``)
+
+ `monofont`
+ If set to true, add a tag to show the code with a monospace font
+ (default: ``false``).
+ """
+ name = 'BBCode'
+ aliases = ['bbcode', 'bb']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self._code = get_bool_opt(options, 'codetag', False)
+ self._mono = get_bool_opt(options, 'monofont', False)
+
+ self.styles = {}
+ self._make_styles()
+
+ def _make_styles(self):
+ for ttype, ndef in self.style:
+ start = end = ''
+ if ndef['color']:
+ start += '[color=#%s]' % ndef['color']
+ end = '[/color]' + end
+ if ndef['bold']:
+ start += '[b]'
+ end = '[/b]' + end
+ if ndef['italic']:
+ start += '[i]'
+ end = '[/i]' + end
+ if ndef['underline']:
+ start += '[u]'
+ end = '[/u]' + end
+ # there are no common BBcodes for background-color and border
+
+ self.styles[ttype] = start, end
+
+ def format_unencoded(self, tokensource, outfile):
+ if self._code:
+ outfile.write('[code]')
+ if self._mono:
+ outfile.write('[font=monospace]')
+
+ lastval = ''
+ lasttype = None
+
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ if ttype == lasttype:
+ lastval += value
+ else:
+ if lastval:
+ start, end = self.styles[lasttype]
+ outfile.write(''.join((start, lastval, end)))
+ lastval = value
+ lasttype = ttype
+
+ if lastval:
+ start, end = self.styles[lasttype]
+ outfile.write(''.join((start, lastval, end)))
+
+ if self._mono:
+ outfile.write('[/font]')
+ if self._code:
+ outfile.write('[/code]')
+ if self._code or self._mono:
+ outfile.write('\n')
diff --git a/pygments/formatters/groff.py b/pygments/formatters/groff.py
new file mode 100644
index 0000000..7d409ba
--- /dev/null
+++ b/pygments/formatters/groff.py
@@ -0,0 +1,170 @@
+"""
+ pygments.formatters.groff
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for groff output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import math
+from pygments.formatter import Formatter
+from pygments.util import get_bool_opt, get_int_opt
+
+__all__ = ['GroffFormatter']
+
+
+class GroffFormatter(Formatter):
+ """
+ Format tokens with groff escapes to change their color and font style.
+
+ .. versionadded:: 2.11
+
+ Additional options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `monospaced`
+ If set to true, monospace font will be used (default: ``true``).
+
+ `linenos`
+ If set to true, print the line numbers (default: ``false``).
+
+ `wrap`
+ Wrap lines to the specified number of characters. Disabled if set to 0
+ (default: ``0``).
+ """
+
+ name = 'groff'
+ aliases = ['groff','troff','roff']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ self.monospaced = get_bool_opt(options, 'monospaced', True)
+ self.linenos = get_bool_opt(options, 'linenos', False)
+ self._lineno = 0
+ self.wrap = get_int_opt(options, 'wrap', 0)
+ self._linelen = 0
+
+ self.styles = {}
+ self._make_styles()
+
+
+ def _make_styles(self):
+ regular = '\\f[CR]' if self.monospaced else '\\f[R]'
+ bold = '\\f[CB]' if self.monospaced else '\\f[B]'
+ italic = '\\f[CI]' if self.monospaced else '\\f[I]'
+
+ for ttype, ndef in self.style:
+ start = end = ''
+ if ndef['color']:
+ start += '\\m[%s]' % ndef['color']
+ end = '\\m[]' + end
+ if ndef['bold']:
+ start += bold
+ end = regular + end
+ if ndef['italic']:
+ start += italic
+ end = regular + end
+ if ndef['bgcolor']:
+ start += '\\M[%s]' % ndef['bgcolor']
+ end = '\\M[]' + end
+
+ self.styles[ttype] = start, end
+
+
+ def _define_colors(self, outfile):
+ colors = set()
+ for _, ndef in self.style:
+ if ndef['color'] is not None:
+ colors.add(ndef['color'])
+
+ for color in colors:
+ outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
+
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+
+ def _wrap_line(self, line):
+ length = len(line.rstrip('\n'))
+ space = ' ' if self.linenos else ''
+ newline = ''
+
+ if length > self.wrap:
+ for i in range(0, math.floor(length / self.wrap)):
+ chunk = line[i*self.wrap:i*self.wrap+self.wrap]
+ newline += (chunk + '\n' + space)
+ remainder = length % self.wrap
+ if remainder > 0:
+ newline += line[-remainder-1:]
+ self._linelen = remainder
+ elif self._linelen + length > self.wrap:
+ newline = ('\n' + space) + line
+ self._linelen = length
+ else:
+ newline = line
+ self._linelen += length
+
+ return newline
+
+
+ def _escape_chars(self, text):
+ text = text.replace('\\', '\\[u005C]'). \
+ replace('.', '\\[char46]'). \
+ replace('\'', '\\[u0027]'). \
+ replace('`', '\\[u0060]'). \
+ replace('~', '\\[u007E]')
+ copy = text
+
+ for char in copy:
+ if len(char) != len(char.encode()):
+ uni = char.encode('unicode_escape') \
+ .decode()[1:] \
+ .replace('x', 'u00') \
+ .upper()
+ text = text.replace(char, '\\[u' + uni[1:] + ']')
+
+ return text
+
+
+ def format_unencoded(self, tokensource, outfile):
+ self._define_colors(outfile)
+
+ outfile.write('.nf\n\\f[CR]\n')
+
+ if self.linenos:
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ start, end = self.styles[ttype]
+
+ for line in value.splitlines(True):
+ if self.wrap > 0:
+ line = self._wrap_line(line)
+
+ if start and end:
+ text = self._escape_chars(line.rstrip('\n'))
+ if text != '':
+ outfile.write(''.join((start, text, end)))
+ else:
+ outfile.write(self._escape_chars(line.rstrip('\n')))
+
+ if line.endswith('\n'):
+ if self.linenos:
+ self._write_lineno(outfile)
+ self._linelen = 0
+ else:
+ outfile.write('\n')
+ self._linelen = 0
+
+ outfile.write('\n.fi')
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
new file mode 100644
index 0000000..ee53f13
--- /dev/null
+++ b/pygments/formatters/html.py
@@ -0,0 +1,991 @@
+"""
+ pygments.formatters.html
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for HTML output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import functools
+import os
+import sys
+import os.path
+from io import StringIO
+
+from pygments.formatter import Formatter
+from pygments.token import Token, Text, STANDARD_TYPES
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt
+
+try:
+ import ctags
+except ImportError:
+ ctags = None
+
+__all__ = ['HtmlFormatter']
+
+
+_escape_html_table = {
+ ord('&'): '&amp;',
+ ord('<'): '&lt;',
+ ord('>'): '&gt;',
+ ord('"'): '&quot;',
+ ord("'"): '&#39;',
+}
+
+
+def escape_html(text, table=_escape_html_table):
+ """Escape &, <, > as well as single and double quotes for HTML."""
+ return text.translate(table)
+
+
+def webify(color):
+ if color.startswith('calc') or color.startswith('var'):
+ return color
+ else:
+ return '#' + color
+
+
+def _get_ttype_class(ttype):
+ fname = STANDARD_TYPES.get(ttype)
+ if fname:
+ return fname
+ aname = ''
+ while fname is None:
+ aname = '-' + ttype[-1] + aname
+ ttype = ttype.parent
+ fname = STANDARD_TYPES.get(ttype)
+ return fname + aname
+
+
+CSSFILE_TEMPLATE = '''\
+/*
+generated by Pygments <https://pygments.org/>
+Copyright 2006-2022 by the Pygments team.
+Licensed under the BSD license, see LICENSE for details.
+*/
+%(styledefs)s
+'''
+
+DOC_HEADER = '''\
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<!--
+generated by Pygments <https://pygments.org/>
+Copyright 2006-2022 by the Pygments team.
+Licensed under the BSD license, see LICENSE for details.
+-->
+<html>
+<head>
+ <title>%(title)s</title>
+ <meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
+ <style type="text/css">
+''' + CSSFILE_TEMPLATE + '''
+ </style>
+</head>
+<body>
+<h2>%(title)s</h2>
+
+'''
+
+DOC_HEADER_EXTERNALCSS = '''\
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>%(title)s</title>
+ <meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
+ <link rel="stylesheet" href="%(cssfile)s" type="text/css">
+</head>
+<body>
+<h2>%(title)s</h2>
+
+'''
+
+DOC_FOOTER = '''\
+</body>
+</html>
+'''
+
+
+class HtmlFormatter(Formatter):
+ r"""
+ Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
+ in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
+ option.
+
+ If the `linenos` option is set to ``"table"``, the ``<pre>`` is
+ additionally wrapped inside a ``<table>`` which has one row and two
+ cells: one containing the line numbers and one containing the code.
+ Example:
+
+ .. sourcecode:: html
+
+ <div class="highlight" >
+ <table><tr>
+ <td class="linenos" title="click to toggle"
+ onclick="with (this.firstChild.style)
+ { display = (display == '') ? 'none' : '' }">
+ <pre>1
+ 2</pre>
+ </td>
+ <td class="code">
+ <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
+ <span class="Ke">pass</span>
+ </pre>
+ </td>
+ </tr></table></div>
+
+ (whitespace added to improve clarity).
+
+ Wrapping can be disabled using the `nowrap` option.
+
+ A list of lines can be specified using the `hl_lines` option to make these
+ lines highlighted (as of Pygments 0.11).
+
+ With the `full` option, a complete HTML 4 document is output, including
+ the style definitions inside a ``<style>`` tag, or in a separate file if
+ the `cssfile` option is given.
+
+ When `tagsfile` is set to the path of a ctags index file, it is used to
+ generate hyperlinks from names to their definition. You must enable
+ `lineanchors` and run ctags with the `-n` option for this to work. The
+ `python-ctags` module from PyPI must be installed to use this feature;
+ otherwise a `RuntimeError` will be raised.
+
+ The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
+ containing CSS rules for the CSS classes used by the formatter. The
+ argument `arg` can be used to specify additional CSS selectors that
+ are prepended to the classes. A call `fmter.get_style_defs('td .code')`
+ would result in the following CSS classes:
+
+ .. sourcecode:: css
+
+ td .code .kw { font-weight: bold; color: #00FF00 }
+ td .code .cm { color: #999999 }
+ ...
+
+ If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
+ `get_style_defs()` method to request multiple prefixes for the tokens:
+
+ .. sourcecode:: python
+
+ formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
+
+ The output would then look like this:
+
+ .. sourcecode:: css
+
+ div.syntax pre .kw,
+ pre.syntax .kw { font-weight: bold; color: #00FF00 }
+ div.syntax pre .cm,
+ pre.syntax .cm { color: #999999 }
+ ...
+
+ Additional options accepted:
+
+ `nowrap`
+ If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
+ tag. This disables most other options (default: ``False``).
+
+ `full`
+ Tells the formatter to output a "full" document, i.e. a complete
+ self-contained document (default: ``False``).
+
+ `title`
+ If `full` is true, the title that should be used to caption the
+ document (default: ``''``).
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``). This option has no effect if the `cssfile`
+ and `noclobber_cssfile` option are given and the file specified in
+ `cssfile` exists.
+
+ `noclasses`
+ If set to true, token ``<span>`` tags (as well as line number elements)
+ will not use CSS classes, but inline styles. This is not recommended
+ for larger pieces of code since it increases output size by quite a bit
+ (default: ``False``).
+
+ `classprefix`
+ Since the token types use relatively short class names, they may clash
+ with some of your own class names. In this case you can use the
+ `classprefix` option to give a string to prepend to all Pygments-generated
+ CSS class names for token types.
+ Note that this option also affects the output of `get_style_defs()`.
+
+ `cssclass`
+ CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
+ If you set this option, the default selector for `get_style_defs()`
+ will be this class.
+
+ .. versionadded:: 0.9
+ If you select the ``'table'`` line numbers, the wrapping table will
+ have a CSS class of this string plus ``'table'``, the default is
+ accordingly ``'highlighttable'``.
+
+ `cssstyles`
+ Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
+
+ `prestyles`
+ Inline CSS styles for the ``<pre>`` tag (default: ``''``).
+
+ .. versionadded:: 0.11
+
+ `cssfile`
+ If the `full` option is true and this option is given, it must be the
+ name of an external file. If the filename does not include an absolute
+ path, the file's path will be assumed to be relative to the main output
+ file's path, if the latter can be found. The stylesheet is then written
+ to this file instead of the HTML file.
+
+ .. versionadded:: 0.6
+
+ `noclobber_cssfile`
+ If `cssfile` is given and the specified file exists, the css file will
+ not be overwritten. This allows the use of the `full` option in
+ combination with a user specified css file. Default is ``False``.
+
+ .. versionadded:: 1.1
+
+ `linenos`
+ If set to ``'table'``, output line numbers as a table with two cells,
+ one containing the line numbers, the other the whole code. This is
+ copy-and-paste-friendly, but may cause alignment problems with some
+ browsers or fonts. If set to ``'inline'``, the line numbers will be
+ integrated in the ``<pre>`` tag that contains the code (that setting
+ is *new in Pygments 0.8*).
+
+ For compatibility with Pygments 0.7 and earlier, every true value
+ except ``'inline'`` means the same as ``'table'`` (in particular, that
+ means also ``True``).
+
+ The default value is ``False``, which means no line numbers at all.
+
+ **Note:** with the default ("table") line number mechanism, the line
+ numbers and code can have different line heights in Internet Explorer
+ unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
+ CSS property (you get the default line spacing with ``line-height:
+ 125%``).
+
+ `hl_lines`
+ Specify a list of lines to be highlighted. The line numbers are always
+ relative to the input (i.e. the first line is line 1) and are
+ independent of `linenostart`.
+
+ .. versionadded:: 0.11
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ `linenospecial`
+ If set to a number n > 0, every nth line number is given the CSS
+ class ``"special"`` (default: ``0``).
+
+ `nobackground`
+ If set to ``True``, the formatter won't output the background color
+ for the wrapping element (this automatically defaults to ``False``
+ when there is no wrapping element [eg: no argument for the
+ `get_syntax_defs` method given]) (default: ``False``).
+
+ .. versionadded:: 0.6
+
+ `lineseparator`
+ This string is output between lines of code. It defaults to ``"\n"``,
+ which is enough to break a line inside ``<pre>`` tags, but you can
+ e.g. set it to ``"<br>"`` to get HTML line breaks.
+
+ .. versionadded:: 0.7
+
+ `lineanchors`
+ If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
+ output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
+ This allows easy linking to certain lines.
+
+ .. versionadded:: 0.9
+
+ `linespans`
+ If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
+ output line in a span tag with an ``id`` of ``foo-linenumber``.
+ This allows easy access to lines via javascript.
+
+ .. versionadded:: 1.6
+
+ `anchorlinenos`
+ If set to `True`, will wrap line numbers in <a> tags. Used in
+ combination with `linenos` and `lineanchors`.
+
+ `tagsfile`
+ If set to the path of a ctags file, wrap names in anchor tags that
+ link to their definitions. `lineanchors` should be used, and the
+ tags file should specify line numbers (see the `-n` option to ctags).
+
+ .. versionadded:: 1.6
+
+ `tagurlformat`
+ A string formatting pattern used to generate links to ctags definitions.
+ Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
+ Defaults to an empty string, resulting in just `#prefix-number` links.
+
+ .. versionadded:: 1.6
+
+ `filename`
+ A string used to generate a filename when rendering ``<pre>`` blocks,
+ for example if displaying source code. If `linenos` is set to
+ ``'table'`` then the filename will be rendered in an initial row
+ containing a single `<th>` which spans both columns.
+
+ .. versionadded:: 2.1
+
+ `wrapcode`
+ Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
+ by the HTML5 specification.
+
+ .. versionadded:: 2.4
+
+ `debug_token_types`
+ Add ``title`` attributes to all token ``<span>`` tags that show the
+ name of the token.
+
+ .. versionadded:: 2.10
+
+
+ **Subclassing the HTML formatter**
+
+ .. versionadded:: 0.7
+
+ The HTML formatter is now built in a way that allows easy subclassing, thus
+ customizing the output HTML code. The `format()` method calls
+ `self._format_lines()` which returns a generator that yields tuples of ``(1,
+ line)``, where the ``1`` indicates that the ``line`` is a line of the
+ formatted source code.
+
+ If the `nowrap` option is set, the generator is the iterated over and the
+ resulting HTML is output.
+
+ Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
+ other generators. These may add some HTML code to the one generated by
+ `_format_lines()`, either by modifying the lines generated by the latter,
+ then yielding them again with ``(1, line)``, and/or by yielding other HTML
+ code before or after the lines, with ``(0, html)``. The distinction between
+ source lines and other code makes it possible to wrap the generator multiple
+ times.
+
+ The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
+
+ A custom `HtmlFormatter` subclass could look like this:
+
+ .. sourcecode:: python
+
+ class CodeHtmlFormatter(HtmlFormatter):
+
+ def wrap(self, source, *, include_div):
+ return self._wrap_code(source)
+
+ def _wrap_code(self, source):
+ yield 0, '<code>'
+ for i, t in source:
+ if i == 1:
+ # it's a line of formatted code
+ t += '<br>'
+ yield i, t
+ yield 0, '</code>'
+
+ This results in wrapping the formatted lines with a ``<code>`` tag, where the
+ source lines are broken using ``<br>`` tags.
+
+ After calling `wrap()`, the `format()` method also adds the "line numbers"
+ and/or "full document" wrappers if the respective options are set. Then, all
+ HTML yielded by the wrapped generator is output.
+ """
+
+ name = 'HTML'
+ aliases = ['html']
+ filenames = ['*.html', '*.htm']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.title = self._decodeifneeded(self.title)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
+ self.noclasses = get_bool_opt(options, 'noclasses', False)
+ self.classprefix = options.get('classprefix', '')
+ self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
+ self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
+ self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
+ self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
+ self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
+ self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
+ self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
+ self.filename = self._decodeifneeded(options.get('filename', ''))
+ self.wrapcode = get_bool_opt(options, 'wrapcode', False)
+ self.span_element_openers = {}
+ self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
+
+ if self.tagsfile:
+ if not ctags:
+ raise RuntimeError('The "ctags" package must to be installed '
+ 'to be able to use the "tagsfile" feature.')
+ self._ctags = ctags.CTags(self.tagsfile)
+
+ linenos = options.get('linenos', False)
+ if linenos == 'inline':
+ self.linenos = 2
+ elif linenos:
+ # compatibility with <= 0.7
+ self.linenos = 1
+ else:
+ self.linenos = 0
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
+ self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
+ self.lineseparator = options.get('lineseparator', '\n')
+ self.lineanchors = options.get('lineanchors', '')
+ self.linespans = options.get('linespans', '')
+ self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
+ self.hl_lines = set()
+ for lineno in get_list_opt(options, 'hl_lines', []):
+ try:
+ self.hl_lines.add(int(lineno))
+ except ValueError:
+ pass
+
+ self._create_stylesheet()
+
+ def _get_css_class(self, ttype):
+ """Return the css class of this token type prefixed with
+ the classprefix option."""
+ ttypeclass = _get_ttype_class(ttype)
+ if ttypeclass:
+ return self.classprefix + ttypeclass
+ return ''
+
+ def _get_css_classes(self, ttype):
+ """Return the CSS classes of this token type prefixed with the classprefix option."""
+ cls = self._get_css_class(ttype)
+ while ttype not in STANDARD_TYPES:
+ ttype = ttype.parent
+ cls = self._get_css_class(ttype) + ' ' + cls
+ return cls or ''
+
+ def _get_css_inline_styles(self, ttype):
+ """Return the inline CSS styles for this token type."""
+ cclass = self.ttype2class.get(ttype)
+ while cclass is None:
+ ttype = ttype.parent
+ cclass = self.ttype2class.get(ttype)
+ return cclass or ''
+
+ def _create_stylesheet(self):
+ t2c = self.ttype2class = {Token: ''}
+ c2s = self.class2style = {}
+ for ttype, ndef in self.style:
+ name = self._get_css_class(ttype)
+ style = ''
+ if ndef['color']:
+ style += 'color: %s; ' % webify(ndef['color'])
+ if ndef['bold']:
+ style += 'font-weight: bold; '
+ if ndef['italic']:
+ style += 'font-style: italic; '
+ if ndef['underline']:
+ style += 'text-decoration: underline; '
+ if ndef['bgcolor']:
+ style += 'background-color: %s; ' % webify(ndef['bgcolor'])
+ if ndef['border']:
+ style += 'border: 1px solid %s; ' % webify(ndef['border'])
+ if style:
+ t2c[ttype] = name
+ # save len(ttype) to enable ordering the styles by
+ # hierarchy (necessary for CSS cascading rules!)
+ c2s[name] = (style[:-2], ttype, len(ttype))
+
+ def get_style_defs(self, arg=None):
+ """
+ Return CSS style definitions for the classes produced by the current
+ highlighting style. ``arg`` can be a string or list of selectors to
+ insert before the token type classes.
+ """
+ style_lines = []
+
+ style_lines.extend(self.get_linenos_style_defs())
+ style_lines.extend(self.get_background_style_defs(arg))
+ style_lines.extend(self.get_token_style_defs(arg))
+
+ return '\n'.join(style_lines)
+
+ def get_token_style_defs(self, arg=None):
+ prefix = self.get_css_prefix(arg)
+
+ styles = [
+ (level, ttype, cls, style)
+ for cls, (style, ttype, level) in self.class2style.items()
+ if cls and style
+ ]
+ styles.sort()
+
+ lines = [
+ '%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
+ for (level, ttype, cls, style) in styles
+ ]
+
+ return lines
+
+ def get_background_style_defs(self, arg=None):
+ prefix = self.get_css_prefix(arg)
+ bg_color = self.style.background_color
+ hl_color = self.style.highlight_color
+
+ lines = []
+
+ if arg and not self.nobackground and bg_color is not None:
+ text_style = ''
+ if Text in self.ttype2class:
+ text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
+ lines.insert(
+ 0, '%s{ background: %s;%s }' % (
+ prefix(''), bg_color, text_style
+ )
+ )
+ if hl_color is not None:
+ lines.insert(
+ 0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
+ )
+
+ return lines
+
+ def get_linenos_style_defs(self):
+ lines = [
+ 'pre { %s }' % self._pre_style,
+ 'td.linenos .normal { %s }' % self._linenos_style,
+ 'span.linenos { %s }' % self._linenos_style,
+ 'td.linenos .special { %s }' % self._linenos_special_style,
+ 'span.linenos.special { %s }' % self._linenos_special_style,
+ ]
+
+ return lines
+
+ def get_css_prefix(self, arg):
+ if arg is None:
+ arg = ('cssclass' in self.options and '.'+self.cssclass or '')
+ if isinstance(arg, str):
+ args = [arg]
+ else:
+ args = list(arg)
+
+ def prefix(cls):
+ if cls:
+ cls = '.' + cls
+ tmp = []
+ for arg in args:
+ tmp.append((arg and arg + ' ' or '') + cls)
+ return ', '.join(tmp)
+
+ return prefix
+
+ @property
+ def _pre_style(self):
+ return 'line-height: 125%;'
+
+ @property
+ def _linenos_style(self):
+ return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
+ self.style.line_number_color,
+ self.style.line_number_background_color
+ )
+
+ @property
+ def _linenos_special_style(self):
+ return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
+ self.style.line_number_special_color,
+ self.style.line_number_special_background_color
+ )
+
+ def _decodeifneeded(self, value):
+ if isinstance(value, bytes):
+ if self.encoding:
+ return value.decode(self.encoding)
+ return value.decode()
+ return value
+
+ def _wrap_full(self, inner, outfile):
+ if self.cssfile:
+ if os.path.isabs(self.cssfile):
+ # it's an absolute filename
+ cssfilename = self.cssfile
+ else:
+ try:
+ filename = outfile.name
+ if not filename or filename[0] == '<':
+ # pseudo files, e.g. name == '<fdopen>'
+ raise AttributeError
+ cssfilename = os.path.join(os.path.dirname(filename),
+ self.cssfile)
+ except AttributeError:
+ print('Note: Cannot determine output file name, '
+ 'using current directory as base for the CSS file name',
+ file=sys.stderr)
+ cssfilename = self.cssfile
+ # write CSS file only if noclobber_cssfile isn't given as an option.
+ try:
+ if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
+ with open(cssfilename, "w") as cf:
+ cf.write(CSSFILE_TEMPLATE %
+ {'styledefs': self.get_style_defs('body')})
+ except OSError as err:
+ err.strerror = 'Error writing CSS file: ' + err.strerror
+ raise
+
+ yield 0, (DOC_HEADER_EXTERNALCSS %
+ dict(title=self.title,
+ cssfile=self.cssfile,
+ encoding=self.encoding))
+ else:
+ yield 0, (DOC_HEADER %
+ dict(title=self.title,
+ styledefs=self.get_style_defs('body'),
+ encoding=self.encoding))
+
+ yield from inner
+ yield 0, DOC_FOOTER
+
+ def _wrap_tablelinenos(self, inner):
+ dummyoutfile = StringIO()
+ lncount = 0
+ for t, line in inner:
+ if t:
+ lncount += 1
+ dummyoutfile.write(line)
+
+ fl = self.linenostart
+ mw = len(str(lncount + fl - 1))
+ sp = self.linenospecial
+ st = self.linenostep
+ anchor_name = self.lineanchors or self.linespans
+ aln = self.anchorlinenos
+ nocls = self.noclasses
+
+ lines = []
+
+ for i in range(fl, fl+lncount):
+ print_line = i % st == 0
+ special_line = sp and i % sp == 0
+
+ if print_line:
+ line = '%*d' % (mw, i)
+ if aln:
+ line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
+ else:
+ line = ' ' * mw
+
+ if nocls:
+ if special_line:
+ style = ' style="%s"' % self._linenos_special_style
+ else:
+ style = ' style="%s"' % self._linenos_style
+ else:
+ if special_line:
+ style = ' class="special"'
+ else:
+ style = ' class="normal"'
+
+ if style:
+ line = '<span%s>%s</span>' % (style, line)
+
+ lines.append(line)
+
+ ls = '\n'.join(lines)
+
+ # If a filename was specified, we can't put it into the code table as it
+ # would misalign the line numbers. Hence we emit a separate row for it.
+ filename_tr = ""
+ if self.filename:
+ filename_tr = (
+ '<tr><th colspan="2" class="filename">'
+ '<span class="filename">' + self.filename + '</span>'
+ '</th></tr>')
+
+ # in case you wonder about the seemingly redundant <div> here: since the
+ # content in the other cell also is wrapped in a div, some browsers in
+ # some configurations seem to mess up the formatting...
+ yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
+ '<tr><td class="linenos"><div class="linenodiv"><pre>' +
+ ls + '</pre></div></td><td class="code">')
+ yield 0, '<div>'
+ yield 0, dummyoutfile.getvalue()
+ yield 0, '</div>'
+ yield 0, '</td></tr></table>'
+
+
+ def _wrap_inlinelinenos(self, inner):
+ # need a list of lines since we need the width of a single number :(
+ inner_lines = list(inner)
+ sp = self.linenospecial
+ st = self.linenostep
+ num = self.linenostart
+ mw = len(str(len(inner_lines) + num - 1))
+ anchor_name = self.lineanchors or self.linespans
+ aln = self.anchorlinenos
+ nocls = self.noclasses
+
+ for _, inner_line in inner_lines:
+ print_line = num % st == 0
+ special_line = sp and num % sp == 0
+
+ if print_line:
+ line = '%*d' % (mw, num)
+ else:
+ line = ' ' * mw
+
+ if nocls:
+ if special_line:
+ style = ' style="%s"' % self._linenos_special_style
+ else:
+ style = ' style="%s"' % self._linenos_style
+ else:
+ if special_line:
+ style = ' class="linenos special"'
+ else:
+ style = ' class="linenos"'
+
+ if style:
+ linenos = '<span%s>%s</span>' % (style, line)
+ else:
+ linenos = line
+
+ if aln:
+ yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
+ inner_line)
+ else:
+ yield 1, linenos + inner_line
+ num += 1
+
+ def _wrap_lineanchors(self, inner):
+ s = self.lineanchors
+ # subtract 1 since we have to increment i *before* yielding
+ i = self.linenostart - 1
+ for t, line in inner:
+ if t:
+ i += 1
+ href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
+ yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
+ else:
+ yield 0, line
+
+ def _wrap_linespans(self, inner):
+ s = self.linespans
+ i = self.linenostart - 1
+ for t, line in inner:
+ if t:
+ i += 1
+ yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
+ else:
+ yield 0, line
+
+ def _wrap_div(self, inner):
+ style = []
+ if (self.noclasses and not self.nobackground and
+ self.style.background_color is not None):
+ style.append('background: %s' % (self.style.background_color,))
+ if self.cssstyles:
+ style.append(self.cssstyles)
+ style = '; '.join(style)
+
+ yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
+ (style and (' style="%s"' % style)) + '>')
+ yield from inner
+ yield 0, '</div>\n'
+
+ def _wrap_pre(self, inner):
+ style = []
+ if self.prestyles:
+ style.append(self.prestyles)
+ if self.noclasses:
+ style.append(self._pre_style)
+ style = '; '.join(style)
+
+ if self.filename and self.linenos != 1:
+ yield 0, ('<span class="filename">' + self.filename + '</span>')
+
+ # the empty span here is to keep leading empty lines from being
+ # ignored by HTML parsers
+ yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
+ yield from inner
+ yield 0, '</pre>'
+
+ def _wrap_code(self, inner):
+ yield 0, '<code>'
+ yield from inner
+ yield 0, '</code>'
+
+ @functools.lru_cache(maxsize=100)
+ def _translate_parts(self, value):
+ """HTML-escape a value and split it by newlines."""
+ return value.translate(_escape_html_table).split('\n')
+
+ def _format_lines(self, tokensource):
+ """
+ Just format the tokens, without any wrapping tags.
+ Yield individual lines.
+ """
+ nocls = self.noclasses
+ lsep = self.lineseparator
+ tagsfile = self.tagsfile
+
+ lspan = ''
+ line = []
+ for ttype, value in tokensource:
+ try:
+ cspan = self.span_element_openers[ttype]
+ except KeyError:
+ title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else ''
+ if nocls:
+ css_style = self._get_css_inline_styles(ttype)
+ if css_style:
+ css_style = self.class2style[css_style][0]
+ cspan = '<span style="%s"%s>' % (css_style, title)
+ else:
+ cspan = ''
+ else:
+ css_class = self._get_css_classes(ttype)
+ if css_class:
+ cspan = '<span class="%s"%s>' % (css_class, title)
+ else:
+ cspan = ''
+ self.span_element_openers[ttype] = cspan
+
+ parts = self._translate_parts(value)
+
+ if tagsfile and ttype in Token.Name:
+ filename, linenumber = self._lookup_ctag(value)
+ if linenumber:
+ base, filename = os.path.split(filename)
+ if base:
+ base += '/'
+ filename, extension = os.path.splitext(filename)
+ url = self.tagurlformat % {'path': base, 'fname': filename,
+ 'fext': extension}
+ parts[0] = "<a href=\"%s#%s-%d\">%s" % \
+ (url, self.lineanchors, linenumber, parts[0])
+ parts[-1] = parts[-1] + "</a>"
+
+ # for all but the last line
+ for part in parts[:-1]:
+ if line:
+ # Also check for part being non-empty, so we avoid creating
+ # empty <span> tags
+ if lspan != cspan and part:
+ line.extend(((lspan and '</span>'), cspan, part,
+ (cspan and '</span>'), lsep))
+ else: # both are the same, or the current part was empty
+ line.extend((part, (lspan and '</span>'), lsep))
+ yield 1, ''.join(line)
+ line = []
+ elif part:
+ yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
+ else:
+ yield 1, lsep
+ # for the last line
+ if line and parts[-1]:
+ if lspan != cspan:
+ line.extend(((lspan and '</span>'), cspan, parts[-1]))
+ lspan = cspan
+ else:
+ line.append(parts[-1])
+ elif parts[-1]:
+ line = [cspan, parts[-1]]
+ lspan = cspan
+ # else we neither have to open a new span nor set lspan
+
+ if line:
+ line.extend(((lspan and '</span>'), lsep))
+ yield 1, ''.join(line)
+
+ def _lookup_ctag(self, token):
+ entry = ctags.TagEntry()
+ if self._ctags.find(entry, token.encode(), 0):
+ return entry['file'], entry['lineNumber']
+ else:
+ return None, None
+
+ def _highlight_lines(self, tokensource):
+ """
+ Highlighted the lines specified in the `hl_lines` option by
+ post-processing the token stream coming from `_format_lines`.
+ """
+ hls = self.hl_lines
+
+ for i, (t, value) in enumerate(tokensource):
+ if t != 1:
+ yield t, value
+ if i + 1 in hls: # i + 1 because Python indexes start at 0
+ if self.noclasses:
+ style = ''
+ if self.style.highlight_color is not None:
+ style = (' style="background-color: %s"' %
+ (self.style.highlight_color,))
+ yield 1, '<span%s>%s</span>' % (style, value)
+ else:
+ yield 1, '<span class="hll">%s</span>' % value
+ else:
+ yield 1, value
+
+ def wrap(self, source):
+ """
+ Wrap the ``source``, which is a generator yielding
+ individual lines, in custom generators. See docstring
+ for `format`. Can be overridden.
+ """
+
+ output = source
+ if self.wrapcode:
+ output = self._wrap_code(output)
+
+ output = self._wrap_pre(output)
+
+ return output
+
+ def format_unencoded(self, tokensource, outfile):
+ """
+ The formatting process uses several nested generators; which of
+ them are used is determined by the user's options.
+
+ Each generator should take at least one argument, ``inner``,
+ and wrap the pieces of text generated by this.
+
+ Always yield 2-tuples: (code, text). If "code" is 1, the text
+ is part of the original tokensource being highlighted, if it's
+ 0, the text is some piece of wrapping. This makes it possible to
+ use several different wrappers that process the original source
+ linewise, e.g. line number generators.
+ """
+ source = self._format_lines(tokensource)
+
+ # As a special case, we wrap line numbers before line highlighting
+ # so the line numbers get wrapped in the highlighting tag.
+ if not self.nowrap and self.linenos == 2:
+ source = self._wrap_inlinelinenos(source)
+
+ if self.hl_lines:
+ source = self._highlight_lines(source)
+
+ if not self.nowrap:
+ if self.lineanchors:
+ source = self._wrap_lineanchors(source)
+ if self.linespans:
+ source = self._wrap_linespans(source)
+ source = self.wrap(source)
+ if self.linenos == 1:
+ source = self._wrap_tablelinenos(source)
+ source = self._wrap_div(source)
+ if self.full:
+ source = self._wrap_full(source, outfile)
+
+ for t, piece in source:
+ outfile.write(piece)
diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py
new file mode 100644
index 0000000..4839d86
--- /dev/null
+++ b/pygments/formatters/img.py
@@ -0,0 +1,645 @@
+"""
+ pygments.formatters.img
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for Pixmap output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import sys
+
+from pygments.formatter import Formatter
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ get_choice_opt
+
+import subprocess
+
+# Import this carefully
+try:
+ from PIL import Image, ImageDraw, ImageFont
+ pil_available = True
+except ImportError:
+ pil_available = False
+
+try:
+ import _winreg
+except ImportError:
+ try:
+ import winreg as _winreg
+ except ImportError:
+ _winreg = None
+
+__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
+ 'BmpImageFormatter']
+
+
+# For some unknown reason every font calls it something different
+STYLES = {
+ 'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
+ 'ITALIC': ['Oblique', 'Italic'],
+ 'BOLD': ['Bold'],
+ 'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
+}
+
+# A sane default for modern systems
+DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
+DEFAULT_FONT_NAME_WIN = 'Courier New'
+DEFAULT_FONT_NAME_MAC = 'Menlo'
+
+
+class PilNotAvailable(ImportError):
+ """When Python imaging library is not available"""
+
+
+class FontNotFound(Exception):
+ """When there are no usable fonts specified"""
+
+
+class FontManager:
+ """
+ Manages a set of fonts: normal, italic, bold, etc...
+ """
+
+ def __init__(self, font_name, font_size=14):
+ self.font_name = font_name
+ self.font_size = font_size
+ self.fonts = {}
+ self.encoding = None
+ if sys.platform.startswith('win'):
+ if not font_name:
+ self.font_name = DEFAULT_FONT_NAME_WIN
+ self._create_win()
+ elif sys.platform.startswith('darwin'):
+ if not font_name:
+ self.font_name = DEFAULT_FONT_NAME_MAC
+ self._create_mac()
+ else:
+ if not font_name:
+ self.font_name = DEFAULT_FONT_NAME_NIX
+ self._create_nix()
+
+ def _get_nix_font_path(self, name, style):
+ proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
+ stdout=subprocess.PIPE, stderr=None)
+ stdout, _ = proc.communicate()
+ if proc.returncode == 0:
+ lines = stdout.splitlines()
+ for line in lines:
+ if line.startswith(b'Fontconfig warning:'):
+ continue
+ path = line.decode().strip().strip(':')
+ if path:
+ return path
+ return None
+
+ def _create_nix(self):
+ for name in STYLES['NORMAL']:
+ path = self._get_nix_font_path(self.font_name, name)
+ if path is not None:
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ raise FontNotFound('No usable fonts named: "%s"' %
+ self.font_name)
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
+ for stylename in STYLES[style]:
+ path = self._get_nix_font_path(self.font_name, stylename)
+ if path is not None:
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ if style == 'BOLDITALIC':
+ self.fonts[style] = self.fonts['BOLD']
+ else:
+ self.fonts[style] = self.fonts['NORMAL']
+
+ def _get_mac_font_path(self, font_map, name, style):
+ return font_map.get((name + ' ' + style).strip().lower())
+
+ def _create_mac(self):
+ font_map = {}
+ for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
+ '/Library/Fonts/', '/System/Library/Fonts/'):
+ font_map.update(
+ (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
+ for f in os.listdir(font_dir)
+ if f.lower().endswith(('ttf', 'ttc')))
+
+ for name in STYLES['NORMAL']:
+ path = self._get_mac_font_path(font_map, self.font_name, name)
+ if path is not None:
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ raise FontNotFound('No usable fonts named: "%s"' %
+ self.font_name)
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
+ for stylename in STYLES[style]:
+ path = self._get_mac_font_path(font_map, self.font_name, stylename)
+ if path is not None:
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ if style == 'BOLDITALIC':
+ self.fonts[style] = self.fonts['BOLD']
+ else:
+ self.fonts[style] = self.fonts['NORMAL']
+
+ def _lookup_win(self, key, basename, styles, fail=False):
+ for suffix in ('', ' (TrueType)'):
+ for style in styles:
+ try:
+ valname = '%s%s%s' % (basename, style and ' '+style, suffix)
+ val, _ = _winreg.QueryValueEx(key, valname)
+ return val
+ except OSError:
+ continue
+ else:
+ if fail:
+ raise FontNotFound('Font %s (%s) not found in registry' %
+ (basename, styles[0]))
+ return None
+
+ def _create_win(self):
+ lookuperror = None
+ keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
+ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
+ (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
+ (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
+ for keyname in keynames:
+ try:
+ key = _winreg.OpenKey(*keyname)
+ try:
+ path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
+ path = self._lookup_win(key, self.font_name, STYLES[style])
+ if path:
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
+ else:
+ if style == 'BOLDITALIC':
+ self.fonts[style] = self.fonts['BOLD']
+ else:
+ self.fonts[style] = self.fonts['NORMAL']
+ return
+ except FontNotFound as err:
+ lookuperror = err
+ finally:
+ _winreg.CloseKey(key)
+ except OSError:
+ pass
+ else:
+ # If we get here, we checked all registry keys and had no luck
+ # We can be in one of two situations now:
+ # * All key lookups failed. In this case lookuperror is None and we
+ # will raise a generic error
+ # * At least one lookup failed with a FontNotFound error. In this
+ # case, we will raise that as a more specific error
+ if lookuperror:
+ raise lookuperror
+ raise FontNotFound('Can\'t open Windows font registry key')
+
+ def get_char_size(self):
+ """
+ Get the character size.
+ """
+ return self.get_text_size('M')
+
+ def get_text_size(self, text):
+ """
+ Get the text size (width, height).
+ """
+ font = self.fonts['NORMAL']
+ if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
+ return font.getbbox(text)[2:4]
+ else:
+ return font.getsize(text)
+
+ def get_font(self, bold, oblique):
+ """
+ Get the font based on bold and italic flags.
+ """
+ if bold and oblique:
+ return self.fonts['BOLDITALIC']
+ elif bold:
+ return self.fonts['BOLD']
+ elif oblique:
+ return self.fonts['ITALIC']
+ else:
+ return self.fonts['NORMAL']
+
+
+class ImageFormatter(Formatter):
+ """
+ Create a PNG image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 0.10
+
+ Additional options accepted:
+
+ `image_format`
+ An image format to output to that is recognised by PIL, these include:
+
+ * "PNG" (default)
+ * "JPEG"
+ * "BMP"
+ * "GIF"
+
+ `line_pad`
+ The extra spacing (in pixels) between each line of text.
+
+ Default: 2
+
+ `font_name`
+ The font name to be used as the base font from which others, such as
+ bold and italic fonts will be generated. This really should be a
+ monospace font to look sane.
+
+ Default: "Courier New" on Windows, "Menlo" on Mac OS, and
+ "DejaVu Sans Mono" on \\*nix
+
+ `font_size`
+ The font size in points to be used.
+
+ Default: 14
+
+ `image_pad`
+ The padding, in pixels to be used at each edge of the resulting image.
+
+ Default: 10
+
+ `line_numbers`
+ Whether line numbers should be shown: True/False
+
+ Default: True
+
+ `line_number_start`
+ The line number of the first line.
+
+ Default: 1
+
+ `line_number_step`
+ The step used when printing line numbers.
+
+ Default: 1
+
+ `line_number_bg`
+ The background colour (in "#123456" format) of the line number bar, or
+ None to use the style background color.
+
+ Default: "#eed"
+
+ `line_number_fg`
+ The text color of the line numbers (in "#123456"-like format).
+
+ Default: "#886"
+
+ `line_number_chars`
+ The number of columns of line numbers allowable in the line number
+ margin.
+
+ Default: 2
+
+ `line_number_bold`
+ Whether line numbers will be bold: True/False
+
+ Default: False
+
+ `line_number_italic`
+ Whether line numbers will be italicized: True/False
+
+ Default: False
+
+ `line_number_separator`
+ Whether a line will be drawn between the line number area and the
+ source code area: True/False
+
+ Default: True
+
+ `line_number_pad`
+ The horizontal padding (in pixels) between the line number margin, and
+ the source code area.
+
+ Default: 6
+
+ `hl_lines`
+ Specify a list of lines to be highlighted.
+
+ .. versionadded:: 1.2
+
+ Default: empty list
+
+ `hl_color`
+ Specify the color for highlighting lines.
+
+ .. versionadded:: 1.2
+
+ Default: highlight color of the selected style
+ """
+
+ # Required by the pygments mapper
+ name = 'img'
+ aliases = ['img', 'IMG', 'png']
+ filenames = ['*.png']
+
+ unicodeoutput = False
+
+ default_image_format = 'png'
+
+ def __init__(self, **options):
+ """
+ See the class docstring for explanation of options.
+ """
+ if not pil_available:
+ raise PilNotAvailable(
+ 'Python Imaging Library is required for this formatter')
+ Formatter.__init__(self, **options)
+ self.encoding = 'latin1' # let pygments.format() do the right thing
+ # Read the style
+ self.styles = dict(self.style)
+ if self.style.background_color is None:
+ self.background_color = '#fff'
+ else:
+ self.background_color = self.style.background_color
+ # Image options
+ self.image_format = get_choice_opt(
+ options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
+ self.default_image_format, normcase=True)
+ self.image_pad = get_int_opt(options, 'image_pad', 10)
+ self.line_pad = get_int_opt(options, 'line_pad', 2)
+ # The fonts
+ fontsize = get_int_opt(options, 'font_size', 14)
+ self.fonts = FontManager(options.get('font_name', ''), fontsize)
+ self.fontw, self.fonth = self.fonts.get_char_size()
+ # Line number options
+ self.line_number_fg = options.get('line_number_fg', '#886')
+ self.line_number_bg = options.get('line_number_bg', '#eed')
+ self.line_number_chars = get_int_opt(options,
+ 'line_number_chars', 2)
+ self.line_number_bold = get_bool_opt(options,
+ 'line_number_bold', False)
+ self.line_number_italic = get_bool_opt(options,
+ 'line_number_italic', False)
+ self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
+ self.line_numbers = get_bool_opt(options, 'line_numbers', True)
+ self.line_number_separator = get_bool_opt(options,
+ 'line_number_separator', True)
+ self.line_number_step = get_int_opt(options, 'line_number_step', 1)
+ self.line_number_start = get_int_opt(options, 'line_number_start', 1)
+ if self.line_numbers:
+ self.line_number_width = (self.fontw * self.line_number_chars +
+ self.line_number_pad * 2)
+ else:
+ self.line_number_width = 0
+ self.hl_lines = []
+ hl_lines_str = get_list_opt(options, 'hl_lines', [])
+ for line in hl_lines_str:
+ try:
+ self.hl_lines.append(int(line))
+ except ValueError:
+ pass
+ self.hl_color = options.get('hl_color',
+ self.style.highlight_color) or '#f90'
+ self.drawables = []
+
+ def get_style_defs(self, arg=''):
+ raise NotImplementedError('The -S option is meaningless for the image '
+ 'formatter. Use -O style=<stylename> instead.')
+
+ def _get_line_height(self):
+ """
+ Get the height of a line.
+ """
+ return self.fonth + self.line_pad
+
+ def _get_line_y(self, lineno):
+ """
+ Get the Y coordinate of a line number.
+ """
+ return lineno * self._get_line_height() + self.image_pad
+
+ def _get_char_width(self):
+ """
+ Get the width of a character.
+ """
+ return self.fontw
+
+ def _get_char_x(self, linelength):
+ """
+ Get the X coordinate of a character position.
+ """
+ return linelength + self.image_pad + self.line_number_width
+
+ def _get_text_pos(self, linelength, lineno):
+ """
+ Get the actual position for a character and line position.
+ """
+ return self._get_char_x(linelength), self._get_line_y(lineno)
+
+ def _get_linenumber_pos(self, lineno):
+ """
+ Get the actual position for the start of a line number.
+ """
+ return (self.image_pad, self._get_line_y(lineno))
+
+ def _get_text_color(self, style):
+ """
+ Get the correct color for the token from the style.
+ """
+ if style['color'] is not None:
+ fill = '#' + style['color']
+ else:
+ fill = '#000'
+ return fill
+
+ def _get_text_bg_color(self, style):
+ """
+ Get the correct background color for the token from the style.
+ """
+ if style['bgcolor'] is not None:
+ bg_color = '#' + style['bgcolor']
+ else:
+ bg_color = None
+ return bg_color
+
+ def _get_style_font(self, style):
+ """
+ Get the correct font for the style.
+ """
+ return self.fonts.get_font(style['bold'], style['italic'])
+
+ def _get_image_size(self, maxlinelength, maxlineno):
+ """
+ Get the required image size.
+ """
+ return (self._get_char_x(maxlinelength) + self.image_pad,
+ self._get_line_y(maxlineno + 0) + self.image_pad)
+
+ def _draw_linenumber(self, posno, lineno):
+ """
+ Remember a line number drawable to paint later.
+ """
+ self._draw_text(
+ self._get_linenumber_pos(posno),
+ str(lineno).rjust(self.line_number_chars),
+ font=self.fonts.get_font(self.line_number_bold,
+ self.line_number_italic),
+ text_fg=self.line_number_fg,
+ text_bg=None,
+ )
+
+ def _draw_text(self, pos, text, font, text_fg, text_bg):
+ """
+ Remember a single drawable tuple to paint later.
+ """
+ self.drawables.append((pos, text, font, text_fg, text_bg))
+
+ def _create_drawables(self, tokensource):
+ """
+ Create drawables for the token content.
+ """
+ lineno = charno = maxcharno = 0
+ maxlinelength = linelength = 0
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ style = self.styles[ttype]
+ # TODO: make sure tab expansion happens earlier in the chain. It
+ # really ought to be done on the input, as to do it right here is
+ # quite complex.
+ value = value.expandtabs(4)
+ lines = value.splitlines(True)
+ # print lines
+ for i, line in enumerate(lines):
+ temp = line.rstrip('\n')
+ if temp:
+ self._draw_text(
+ self._get_text_pos(linelength, lineno),
+ temp,
+ font = self._get_style_font(style),
+ text_fg = self._get_text_color(style),
+ text_bg = self._get_text_bg_color(style),
+ )
+ temp_width, _ = self.fonts.get_text_size(temp)
+ linelength += temp_width
+ maxlinelength = max(maxlinelength, linelength)
+ charno += len(temp)
+ maxcharno = max(maxcharno, charno)
+ if line.endswith('\n'):
+ # add a line for each extra line in the value
+ linelength = 0
+ charno = 0
+ lineno += 1
+ self.maxlinelength = maxlinelength
+ self.maxcharno = maxcharno
+ self.maxlineno = lineno
+
+ def _draw_line_numbers(self):
+ """
+ Create drawables for the line numbers.
+ """
+ if not self.line_numbers:
+ return
+ for p in range(self.maxlineno):
+ n = p + self.line_number_start
+ if (n % self.line_number_step) == 0:
+ self._draw_linenumber(p, n)
+
+ def _paint_line_number_bg(self, im):
+ """
+ Paint the line number background on the image.
+ """
+ if not self.line_numbers:
+ return
+ if self.line_number_fg is None:
+ return
+ draw = ImageDraw.Draw(im)
+ recth = im.size[-1]
+ rectw = self.image_pad + self.line_number_width - self.line_number_pad
+ draw.rectangle([(0, 0), (rectw, recth)],
+ fill=self.line_number_bg)
+ if self.line_number_separator:
+ draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
+ del draw
+
+ def format(self, tokensource, outfile):
+ """
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
+ tuples and write it into ``outfile``.
+
+ This implementation calculates where it should draw each token on the
+ pixmap, then calculates the required pixmap size and draws the items.
+ """
+ self._create_drawables(tokensource)
+ self._draw_line_numbers()
+ im = Image.new(
+ 'RGB',
+ self._get_image_size(self.maxlinelength, self.maxlineno),
+ self.background_color
+ )
+ self._paint_line_number_bg(im)
+ draw = ImageDraw.Draw(im)
+ # Highlight
+ if self.hl_lines:
+ x = self.image_pad + self.line_number_width - self.line_number_pad + 1
+ recth = self._get_line_height()
+ rectw = im.size[0] - x
+ for linenumber in self.hl_lines:
+ y = self._get_line_y(linenumber - 1)
+ draw.rectangle([(x, y), (x + rectw, y + recth)],
+ fill=self.hl_color)
+ for pos, value, font, text_fg, text_bg in self.drawables:
+ if text_bg:
+ text_size = draw.textsize(text=value, font=font)
+ draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
+ draw.text(pos, value, font=font, fill=text_fg)
+ im.save(outfile, self.image_format.upper())
+
+
+# Add one formatter per format, so that the "-f gif" option gives the correct result
+# when used in pygmentize.
+
+class GifImageFormatter(ImageFormatter):
+ """
+ Create a GIF image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'img_gif'
+ aliases = ['gif']
+ filenames = ['*.gif']
+ default_image_format = 'gif'
+
+
+class JpgImageFormatter(ImageFormatter):
+ """
+ Create a JPEG image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'img_jpg'
+ aliases = ['jpg', 'jpeg']
+ filenames = ['*.jpg']
+ default_image_format = 'jpeg'
+
+
+class BmpImageFormatter(ImageFormatter):
+ """
+ Create a bitmap image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'img_bmp'
+ aliases = ['bmp', 'bitmap']
+ filenames = ['*.bmp']
+ default_image_format = 'bmp'
diff --git a/pygments/formatters/irc.py b/pygments/formatters/irc.py
new file mode 100644
index 0000000..fd5c0d8
--- /dev/null
+++ b/pygments/formatters/irc.py
@@ -0,0 +1,154 @@
+"""
+ pygments.formatters.irc
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for IRC output
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.formatter import Formatter
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Token, Whitespace
+from pygments.util import get_choice_opt
+
+
+__all__ = ['IRCFormatter']
+
+
+#: Map token types to a tuple of color values for light and dark
+#: backgrounds.
+IRC_COLORS = {
+ Token: ('', ''),
+
+ Whitespace: ('gray', 'brightblack'),
+ Comment: ('gray', 'brightblack'),
+ Comment.Preproc: ('cyan', 'brightcyan'),
+ Keyword: ('blue', 'brightblue'),
+ Keyword.Type: ('cyan', 'brightcyan'),
+ Operator.Word: ('magenta', 'brightcyan'),
+ Name.Builtin: ('cyan', 'brightcyan'),
+ Name.Function: ('green', 'brightgreen'),
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
+ Name.Class: ('_green_', '_brightgreen_'),
+ Name.Exception: ('cyan', 'brightcyan'),
+ Name.Decorator: ('brightblack', 'gray'),
+ Name.Variable: ('red', 'brightred'),
+ Name.Constant: ('red', 'brightred'),
+ Name.Attribute: ('cyan', 'brightcyan'),
+ Name.Tag: ('brightblue', 'brightblue'),
+ String: ('yellow', 'yellow'),
+ Number: ('blue', 'brightblue'),
+
+ Generic.Deleted: ('brightred', 'brightred'),
+ Generic.Inserted: ('green', 'brightgreen'),
+ Generic.Heading: ('**', '**'),
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
+ Generic.Error: ('brightred', 'brightred'),
+
+ Error: ('_brightred_', '_brightred_'),
+}
+
+
+IRC_COLOR_MAP = {
+ 'white': 0,
+ 'black': 1,
+ 'blue': 2,
+ 'brightgreen': 3,
+ 'brightred': 4,
+ 'yellow': 5,
+ 'magenta': 6,
+ 'orange': 7,
+ 'green': 7, #compat w/ ansi
+ 'brightyellow': 8,
+ 'lightgreen': 9,
+ 'brightcyan': 9, # compat w/ ansi
+ 'cyan': 10,
+ 'lightblue': 11,
+ 'red': 11, # compat w/ ansi
+ 'brightblue': 12,
+ 'brightmagenta': 13,
+ 'brightblack': 14,
+ 'gray': 15,
+}
+
+def ircformat(color, text):
+ if len(color) < 1:
+ return text
+ add = sub = ''
+ if '_' in color: # italic
+ add += '\x1D'
+ sub = '\x1D' + sub
+ color = color.strip('_')
+ if '*' in color: # bold
+ add += '\x02'
+ sub = '\x02' + sub
+ color = color.strip('*')
+ # underline (\x1F) not supported
+ # backgrounds (\x03FF,BB) not supported
+ if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
+ add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
+ sub = '\x03' + sub
+ return add + text + sub
+ return '<'+add+'>'+text+'</'+sub+'>'
+
+
+class IRCFormatter(Formatter):
+ r"""
+ Format tokens with IRC color sequences
+
+ The `get_style_defs()` method doesn't do anything special since there is
+ no support for common styles.
+
+ Options accepted:
+
+ `bg`
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
+ (default: ``"light"``).
+
+ `colorscheme`
+ A dictionary mapping token types to (lightbg, darkbg) color names or
+ ``None`` (default: ``None`` = use builtin colorscheme).
+
+ `linenos`
+ Set to ``True`` to have line numbers in the output as well
+ (default: ``False`` = no line numbers).
+ """
+ name = 'IRC'
+ aliases = ['irc', 'IRC']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.darkbg = get_choice_opt(options, 'bg',
+ ['light', 'dark'], 'light') == 'dark'
+ self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
+ self.linenos = options.get('linenos', False)
+ self._lineno = 0
+
+ def _write_lineno(self, outfile):
+ if self.linenos:
+ self._lineno += 1
+ outfile.write("%04d: " % self._lineno)
+
+ def format_unencoded(self, tokensource, outfile):
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ color = self.colorscheme.get(ttype)
+ while color is None:
+ ttype = ttype[:-1]
+ color = self.colorscheme.get(ttype)
+ if color:
+ color = color[self.darkbg]
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ if line:
+ outfile.write(ircformat(color, line))
+ outfile.write('\n')
+ self._write_lineno(outfile)
+ if spl[-1]:
+ outfile.write(ircformat(color, spl[-1]))
+ else:
+ outfile.write(value)
diff --git a/pygments/formatters/latex.py b/pygments/formatters/latex.py
new file mode 100644
index 0000000..d33b686
--- /dev/null
+++ b/pygments/formatters/latex.py
@@ -0,0 +1,521 @@
+"""
+ pygments.formatters.latex
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for LaTeX fancyvrb output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from io import StringIO
+
+from pygments.formatter import Formatter
+from pygments.lexer import Lexer, do_insertions
+from pygments.token import Token, STANDARD_TYPES
+from pygments.util import get_bool_opt, get_int_opt
+
+
+__all__ = ['LatexFormatter']
+
+
+def escape_tex(text, commandprefix):
+ return text.replace('\\', '\x00'). \
+ replace('{', '\x01'). \
+ replace('}', '\x02'). \
+ replace('\x00', r'\%sZbs{}' % commandprefix). \
+ replace('\x01', r'\%sZob{}' % commandprefix). \
+ replace('\x02', r'\%sZcb{}' % commandprefix). \
+ replace('^', r'\%sZca{}' % commandprefix). \
+ replace('_', r'\%sZus{}' % commandprefix). \
+ replace('&', r'\%sZam{}' % commandprefix). \
+ replace('<', r'\%sZlt{}' % commandprefix). \
+ replace('>', r'\%sZgt{}' % commandprefix). \
+ replace('#', r'\%sZsh{}' % commandprefix). \
+ replace('%', r'\%sZpc{}' % commandprefix). \
+ replace('$', r'\%sZdl{}' % commandprefix). \
+ replace('-', r'\%sZhy{}' % commandprefix). \
+ replace("'", r'\%sZsq{}' % commandprefix). \
+ replace('"', r'\%sZdq{}' % commandprefix). \
+ replace('~', r'\%sZti{}' % commandprefix)
+
+
+DOC_TEMPLATE = r'''
+\documentclass{%(docclass)s}
+\usepackage{fancyvrb}
+\usepackage{color}
+\usepackage[%(encoding)s]{inputenc}
+%(preamble)s
+
+%(styledefs)s
+
+\begin{document}
+
+\section*{%(title)s}
+
+%(code)s
+\end{document}
+'''
+
+## Small explanation of the mess below :)
+#
+# The previous version of the LaTeX formatter just assigned a command to
+# each token type defined in the current style. That obviously is
+# problematic if the highlighted code is produced for a different style
+# than the style commands themselves.
+#
+# This version works much like the HTML formatter which assigns multiple
+# CSS classes to each <span> tag, from the most specific to the least
+# specific token type, thus falling back to the parent token type if one
+# is not defined. Here, the classes are there too and use the same short
+# forms given in token.STANDARD_TYPES.
+#
+# Highlighted code now only uses one custom command, which by default is
+# \PY and selectable by the commandprefix option (and in addition the
+# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
+# backwards compatibility purposes).
+#
+# \PY has two arguments: the classes, separated by +, and the text to
+# render in that style. The classes are resolved into the respective
+# style commands by magic, which serves to ignore unknown classes.
+#
+# The magic macros are:
+# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
+# to render in \PY@do. Their definition determines the style.
+# * \PY@reset resets \PY@it etc. to do nothing.
+# * \PY@toks parses the list of classes, using magic inspired by the
+# keyval package (but modified to use plusses instead of commas
+# because fancyvrb redefines commas inside its environments).
+# * \PY@tok processes one class, calling the \PY@tok@classname command
+# if it exists.
+# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
+# for its class.
+# * \PY resets the style, parses the classnames and then calls \PY@do.
+#
+# Tip: to read this code, print it out in substituted form using e.g.
+# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
+
+STYLE_TEMPLATE = r'''
+\makeatletter
+\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
+ \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
+ \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
+\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
+\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
+ \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
+\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
+ \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
+\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
+
+%(styles)s
+
+\def\%(cp)sZbs{\char`\\}
+\def\%(cp)sZus{\char`\_}
+\def\%(cp)sZob{\char`\{}
+\def\%(cp)sZcb{\char`\}}
+\def\%(cp)sZca{\char`\^}
+\def\%(cp)sZam{\char`\&}
+\def\%(cp)sZlt{\char`\<}
+\def\%(cp)sZgt{\char`\>}
+\def\%(cp)sZsh{\char`\#}
+\def\%(cp)sZpc{\char`\%%}
+\def\%(cp)sZdl{\char`\$}
+\def\%(cp)sZhy{\char`\-}
+\def\%(cp)sZsq{\char`\'}
+\def\%(cp)sZdq{\char`\"}
+\def\%(cp)sZti{\char`\~}
+%% for compatibility with earlier versions
+\def\%(cp)sZat{@}
+\def\%(cp)sZlb{[}
+\def\%(cp)sZrb{]}
+\makeatother
+'''
+
+
+def _get_ttype_name(ttype):
+ fname = STANDARD_TYPES.get(ttype)
+ if fname:
+ return fname
+ aname = ''
+ while fname is None:
+ aname = ttype[-1] + aname
+ ttype = ttype.parent
+ fname = STANDARD_TYPES.get(ttype)
+ return fname + aname
+
+
+class LatexFormatter(Formatter):
+ r"""
+ Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
+ standard packages.
+
+ Without the `full` option, code is formatted as one ``Verbatim``
+ environment, like this:
+
+ .. sourcecode:: latex
+
+ \begin{Verbatim}[commandchars=\\\{\}]
+ \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
+ \PY{k}{pass}
+ \end{Verbatim}
+
+ Wrapping can be disabled using the `nowrap` option.
+
+ The special command used here (``\PY``) and all the other macros it needs
+ are output by the `get_style_defs` method.
+
+ With the `full` option, a complete LaTeX document is output, including
+ the command definitions in the preamble.
+
+ The `get_style_defs()` method of a `LatexFormatter` returns a string
+ containing ``\def`` commands defining the macros needed inside the
+ ``Verbatim`` environments.
+
+ Additional options accepted:
+
+ `nowrap`
+ If set to ``True``, don't wrap the tokens at all, not even inside a
+ ``\begin{Verbatim}`` environment. This disables most other options
+ (default: ``False``).
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `full`
+ Tells the formatter to output a "full" document, i.e. a complete
+ self-contained document (default: ``False``).
+
+ `title`
+ If `full` is true, the title that should be used to caption the
+ document (default: ``''``).
+
+ `docclass`
+ If the `full` option is enabled, this is the document class to use
+ (default: ``'article'``).
+
+ `preamble`
+ If the `full` option is enabled, this can be further preamble commands,
+ e.g. ``\usepackage`` (default: ``''``).
+
+ `linenos`
+ If set to ``True``, output line numbers (default: ``False``).
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ `verboptions`
+ Additional options given to the Verbatim environment (see the *fancyvrb*
+ docs for possible values) (default: ``''``).
+
+ `commandprefix`
+ The LaTeX commands used to produce colored output are constructed
+ using this prefix and some letters (default: ``'PY'``).
+
+ .. versionadded:: 0.7
+ .. versionchanged:: 0.10
+ The default is now ``'PY'`` instead of ``'C'``.
+
+ `texcomments`
+ If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
+ in comment tokens is not escaped so that LaTeX can render it (default:
+ ``False``).
+
+ .. versionadded:: 1.2
+
+ `mathescape`
+ If set to ``True``, enables LaTeX math mode escape in comments. That
+ is, ``'$...$'`` inside a comment will trigger math mode (default:
+ ``False``).
+
+ .. versionadded:: 1.2
+
+ `escapeinside`
+ If set to a string of length 2, enables escaping to LaTeX. Text
+ delimited by these 2 characters is read as LaTeX code and
+ typeset accordingly. It has no effect in string literals. It has
+ no effect in comments if `texcomments` or `mathescape` is
+ set. (default: ``''``).
+
+ .. versionadded:: 2.0
+
+ `envname`
+ Allows you to pick an alternative environment name replacing Verbatim.
+ The alternate environment still has to support Verbatim's option syntax.
+ (default: ``'Verbatim'``).
+
+ .. versionadded:: 2.0
+ """
+ name = 'LaTeX'
+ aliases = ['latex', 'tex']
+ filenames = ['*.tex']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
+ self.docclass = options.get('docclass', 'article')
+ self.preamble = options.get('preamble', '')
+ self.linenos = get_bool_opt(options, 'linenos', False)
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
+ self.verboptions = options.get('verboptions', '')
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
+ self.commandprefix = options.get('commandprefix', 'PY')
+ self.texcomments = get_bool_opt(options, 'texcomments', False)
+ self.mathescape = get_bool_opt(options, 'mathescape', False)
+ self.escapeinside = options.get('escapeinside', '')
+ if len(self.escapeinside) == 2:
+ self.left = self.escapeinside[0]
+ self.right = self.escapeinside[1]
+ else:
+ self.escapeinside = ''
+ self.envname = options.get('envname', 'Verbatim')
+
+ self._create_stylesheet()
+
+ def _create_stylesheet(self):
+ t2n = self.ttype2name = {Token: ''}
+ c2d = self.cmd2def = {}
+ cp = self.commandprefix
+
+ def rgbcolor(col):
+ if col:
+ return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
+ for i in (0, 2, 4)])
+ else:
+ return '1,1,1'
+
+ for ttype, ndef in self.style:
+ name = _get_ttype_name(ttype)
+ cmndef = ''
+ if ndef['bold']:
+ cmndef += r'\let\$$@bf=\textbf'
+ if ndef['italic']:
+ cmndef += r'\let\$$@it=\textit'
+ if ndef['underline']:
+ cmndef += r'\let\$$@ul=\underline'
+ if ndef['roman']:
+ cmndef += r'\let\$$@ff=\textrm'
+ if ndef['sans']:
+ cmndef += r'\let\$$@ff=\textsf'
+ if ndef['mono']:
+ cmndef += r'\let\$$@ff=\textsf'
+ if ndef['color']:
+ cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
+ rgbcolor(ndef['color']))
+ if ndef['border']:
+ cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
+ r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
+ (rgbcolor(ndef['border']),
+ rgbcolor(ndef['bgcolor'])))
+ elif ndef['bgcolor']:
+ cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
+ r'\colorbox[rgb]{%s}{\strut ##1}}}' %
+ rgbcolor(ndef['bgcolor']))
+ if cmndef == '':
+ continue
+ cmndef = cmndef.replace('$$', cp)
+ t2n[ttype] = name
+ c2d[name] = cmndef
+
+ def get_style_defs(self, arg=''):
+ """
+ Return the command sequences needed to define the commands
+ used to format text in the verbatim environment. ``arg`` is ignored.
+ """
+ cp = self.commandprefix
+ styles = []
+ for name, definition in self.cmd2def.items():
+ styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
+ return STYLE_TEMPLATE % {'cp': self.commandprefix,
+ 'styles': '\n'.join(styles)}
+
+ def format_unencoded(self, tokensource, outfile):
+ # TODO: add support for background colors
+ t2n = self.ttype2name
+ cp = self.commandprefix
+
+ if self.full:
+ realoutfile = outfile
+ outfile = StringIO()
+
+ if not self.nowrap:
+ outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
+ if self.linenos:
+ start, step = self.linenostart, self.linenostep
+ outfile.write(',numbers=left' +
+ (start and ',firstnumber=%d' % start or '') +
+ (step and ',stepnumber=%d' % step or ''))
+ if self.mathescape or self.texcomments or self.escapeinside:
+ outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
+ '\\catcode`\\_=8\\relax}')
+ if self.verboptions:
+ outfile.write(',' + self.verboptions)
+ outfile.write(']\n')
+
+ for ttype, value in tokensource:
+ if ttype in Token.Comment:
+ if self.texcomments:
+ # Try to guess comment starting lexeme and escape it ...
+ start = value[0:1]
+ for i in range(1, len(value)):
+ if start[0] != value[i]:
+ break
+ start += value[i]
+
+ value = value[len(start):]
+ start = escape_tex(start, cp)
+
+ # ... but do not escape inside comment.
+ value = start + value
+ elif self.mathescape:
+ # Only escape parts not inside a math environment.
+ parts = value.split('$')
+ in_math = False
+ for i, part in enumerate(parts):
+ if not in_math:
+ parts[i] = escape_tex(part, cp)
+ in_math = not in_math
+ value = '$'.join(parts)
+ elif self.escapeinside:
+ text = value
+ value = ''
+ while text:
+ a, sep1, text = text.partition(self.left)
+ if sep1:
+ b, sep2, text = text.partition(self.right)
+ if sep2:
+ value += escape_tex(a, cp) + b
+ else:
+ value += escape_tex(a + sep1 + b, cp)
+ else:
+ value += escape_tex(a, cp)
+ else:
+ value = escape_tex(value, cp)
+ elif ttype not in Token.Escape:
+ value = escape_tex(value, cp)
+ styles = []
+ while ttype is not Token:
+ try:
+ styles.append(t2n[ttype])
+ except KeyError:
+ # not in current style
+ styles.append(_get_ttype_name(ttype))
+ ttype = ttype.parent
+ styleval = '+'.join(reversed(styles))
+ if styleval:
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ if line:
+ outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
+ outfile.write('\n')
+ if spl[-1]:
+ outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
+ else:
+ outfile.write(value)
+
+ if not self.nowrap:
+ outfile.write('\\end{' + self.envname + '}\n')
+
+ if self.full:
+ encoding = self.encoding or 'utf8'
+ # map known existings encodings from LaTeX distribution
+ encoding = {
+ 'utf_8': 'utf8',
+ 'latin_1': 'latin1',
+ 'iso_8859_1': 'latin1',
+ }.get(encoding.replace('-', '_'), encoding)
+ realoutfile.write(DOC_TEMPLATE %
+ dict(docclass = self.docclass,
+ preamble = self.preamble,
+ title = self.title,
+ encoding = encoding,
+ styledefs = self.get_style_defs(),
+ code = outfile.getvalue()))
+
+
+class LatexEmbeddedLexer(Lexer):
+ """
+ This lexer takes one lexer as argument, the lexer for the language
+ being formatted, and the left and right delimiters for escaped text.
+
+ First everything is scanned using the language lexer to obtain
+ strings and comments. All other consecutive tokens are merged and
+ the resulting text is scanned for escaped segments, which are given
+ the Token.Escape type. Finally text that is not escaped is scanned
+ again with the language lexer.
+ """
+ def __init__(self, left, right, lang, **options):
+ self.left = left
+ self.right = right
+ self.lang = lang
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ # find and remove all the escape tokens (replace with an empty string)
+ # this is very similar to DelegatingLexer.get_tokens_unprocessed.
+ buffered = ''
+ insertions = []
+ insertion_buf = []
+ for i, t, v in self._find_safe_escape_tokens(text):
+ if t is None:
+ if insertion_buf:
+ insertions.append((len(buffered), insertion_buf))
+ insertion_buf = []
+ buffered += v
+ else:
+ insertion_buf.append((i, t, v))
+ if insertion_buf:
+ insertions.append((len(buffered), insertion_buf))
+ return do_insertions(insertions,
+ self.lang.get_tokens_unprocessed(buffered))
+
+ def _find_safe_escape_tokens(self, text):
+ """ find escape tokens that are not in strings or comments """
+ for i, t, v in self._filter_to(
+ self.lang.get_tokens_unprocessed(text),
+ lambda t: t in Token.Comment or t in Token.String
+ ):
+ if t is None:
+ for i2, t2, v2 in self._find_escape_tokens(v):
+ yield i + i2, t2, v2
+ else:
+ yield i, None, v
+
+ def _filter_to(self, it, pred):
+ """ Keep only the tokens that match `pred`, merge the others together """
+ buf = ''
+ idx = 0
+ for i, t, v in it:
+ if pred(t):
+ if buf:
+ yield idx, None, buf
+ buf = ''
+ yield i, t, v
+ else:
+ if not buf:
+ idx = i
+ buf += v
+ if buf:
+ yield idx, None, buf
+
+ def _find_escape_tokens(self, text):
+ """ Find escape tokens within text, give token=None otherwise """
+ index = 0
+ while text:
+ a, sep1, text = text.partition(self.left)
+ if a:
+ yield index, None, a
+ index += len(a)
+ if sep1:
+ b, sep2, text = text.partition(self.right)
+ if sep2:
+ yield index + len(sep1), Token.Escape, b
+ index += len(sep1) + len(b) + len(sep2)
+ else:
+ yield index, Token.Error, sep1
+ index += len(sep1)
+ text = b
diff --git a/pygments/formatters/other.py b/pygments/formatters/other.py
new file mode 100644
index 0000000..8c7ee28
--- /dev/null
+++ b/pygments/formatters/other.py
@@ -0,0 +1,161 @@
+"""
+ pygments.formatters.other
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Other formatters: NullFormatter, RawTokenFormatter.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.formatter import Formatter
+from pygments.util import get_choice_opt
+from pygments.token import Token
+from pygments.console import colorize
+
+__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
+
+
+class NullFormatter(Formatter):
+ """
+ Output the text unchanged without any formatting.
+ """
+ name = 'Text only'
+ aliases = ['text', 'null']
+ filenames = ['*.txt']
+
+ def format(self, tokensource, outfile):
+ enc = self.encoding
+ for ttype, value in tokensource:
+ if enc:
+ outfile.write(value.encode(enc))
+ else:
+ outfile.write(value)
+
+
+class RawTokenFormatter(Formatter):
+ r"""
+ Format tokens as a raw representation for storing token streams.
+
+ The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
+ be converted to a token stream with the `RawTokenLexer`, described in the
+ :doc:`lexer list <lexers>`.
+
+ Only two options are accepted:
+
+ `compress`
+ If set to ``'gz'`` or ``'bz2'``, compress the output with the given
+ compression algorithm after encoding (default: ``''``).
+ `error_color`
+ If set to a color name, highlight error tokens using that color. If
+ set but with no value, defaults to ``'red'``.
+
+ .. versionadded:: 0.11
+
+ """
+ name = 'Raw tokens'
+ aliases = ['raw', 'tokens']
+ filenames = ['*.raw']
+
+ unicodeoutput = False
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ # We ignore self.encoding if it is set, since it gets set for lexer
+ # and formatter if given with -Oencoding on the command line.
+ # The RawTokenFormatter outputs only ASCII. Override here.
+ self.encoding = 'ascii' # let pygments.format() do the right thing
+ self.compress = get_choice_opt(options, 'compress',
+ ['', 'none', 'gz', 'bz2'], '')
+ self.error_color = options.get('error_color', None)
+ if self.error_color is True:
+ self.error_color = 'red'
+ if self.error_color is not None:
+ try:
+ colorize(self.error_color, '')
+ except KeyError:
+ raise ValueError("Invalid color %r specified" %
+ self.error_color)
+
+ def format(self, tokensource, outfile):
+ try:
+ outfile.write(b'')
+ except TypeError:
+ raise TypeError('The raw tokens formatter needs a binary '
+ 'output file')
+ if self.compress == 'gz':
+ import gzip
+ outfile = gzip.GzipFile('', 'wb', 9, outfile)
+
+ write = outfile.write
+ flush = outfile.close
+ elif self.compress == 'bz2':
+ import bz2
+ compressor = bz2.BZ2Compressor(9)
+
+ def write(text):
+ outfile.write(compressor.compress(text))
+
+ def flush():
+ outfile.write(compressor.flush())
+ outfile.flush()
+ else:
+ write = outfile.write
+ flush = outfile.flush
+
+ if self.error_color:
+ for ttype, value in tokensource:
+ line = b"%r\t%r\n" % (ttype, value)
+ if ttype is Token.Error:
+ write(colorize(self.error_color, line))
+ else:
+ write(line)
+ else:
+ for ttype, value in tokensource:
+ write(b"%r\t%r\n" % (ttype, value))
+ flush()
+
+
+TESTCASE_BEFORE = '''\
+ def testNeedsName(lexer):
+ fragment = %r
+ tokens = [
+'''
+TESTCASE_AFTER = '''\
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+'''
+
+
+class TestcaseFormatter(Formatter):
+ """
+ Format tokens as appropriate for a new testcase.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Testcase'
+ aliases = ['testcase']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ if self.encoding is not None and self.encoding != 'utf-8':
+ raise ValueError("Only None and utf-8 are allowed encodings.")
+
+ def format(self, tokensource, outfile):
+ indentation = ' ' * 12
+ rawbuf = []
+ outbuf = []
+ for ttype, value in tokensource:
+ rawbuf.append(value)
+ outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
+
+ before = TESTCASE_BEFORE % (''.join(rawbuf),)
+ during = ''.join(outbuf)
+ after = TESTCASE_AFTER
+ if self.encoding is None:
+ outfile.write(before + during + after)
+ else:
+ outfile.write(before.encode('utf-8'))
+ outfile.write(during.encode('utf-8'))
+ outfile.write(after.encode('utf-8'))
+ outfile.flush()
diff --git a/pygments/formatters/pangomarkup.py b/pygments/formatters/pangomarkup.py
new file mode 100644
index 0000000..91c1b01
--- /dev/null
+++ b/pygments/formatters/pangomarkup.py
@@ -0,0 +1,83 @@
+"""
+ pygments.formatters.pangomarkup
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for Pango markup output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.formatter import Formatter
+
+
+__all__ = ['PangoMarkupFormatter']
+
+
+_escape_table = {
+ ord('&'): '&amp;',
+ ord('<'): '&lt;',
+}
+
+
+def escape_special_chars(text, table=_escape_table):
+ """Escape & and < for Pango Markup."""
+ return text.translate(table)
+
+
+class PangoMarkupFormatter(Formatter):
+ """
+ Format tokens as Pango Markup code. It can then be rendered to an SVG.
+
+ .. versionadded:: 2.9
+ """
+
+ name = 'Pango Markup'
+ aliases = ['pango', 'pangomarkup']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ self.styles = {}
+
+ for token, style in self.style:
+ start = ''
+ end = ''
+ if style['color']:
+ start += '<span fgcolor="#%s">' % style['color']
+ end = '</span>' + end
+ if style['bold']:
+ start += '<b>'
+ end = '</b>' + end
+ if style['italic']:
+ start += '<i>'
+ end = '</i>' + end
+ if style['underline']:
+ start += '<u>'
+ end = '</u>' + end
+ self.styles[token] = (start, end)
+
+ def format_unencoded(self, tokensource, outfile):
+ lastval = ''
+ lasttype = None
+
+ outfile.write('<tt>')
+
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ if ttype == lasttype:
+ lastval += escape_special_chars(value)
+ else:
+ if lastval:
+ stylebegin, styleend = self.styles[lasttype]
+ outfile.write(stylebegin + lastval + styleend)
+ lastval = escape_special_chars(value)
+ lasttype = ttype
+
+ if lastval:
+ stylebegin, styleend = self.styles[lasttype]
+ outfile.write(stylebegin + lastval + styleend)
+
+ outfile.write('</tt>')
diff --git a/pygments/formatters/rtf.py b/pygments/formatters/rtf.py
new file mode 100644
index 0000000..ccdfb0a
--- /dev/null
+++ b/pygments/formatters/rtf.py
@@ -0,0 +1,146 @@
+"""
+ pygments.formatters.rtf
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ A formatter that generates RTF files.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.formatter import Formatter
+from pygments.util import get_int_opt, surrogatepair
+
+
+__all__ = ['RtfFormatter']
+
+
+class RtfFormatter(Formatter):
+ """
+ Format tokens as RTF markup. This formatter automatically outputs full RTF
+ documents with color information and other useful stuff. Perfect for Copy and
+ Paste into Microsoft(R) Word(R) documents.
+
+ Please note that ``encoding`` and ``outencoding`` options are ignored.
+ The RTF format is ASCII natively, but handles unicode characters correctly
+ thanks to escape sequences.
+
+ .. versionadded:: 0.6
+
+ Additional options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `fontface`
+ The used font family, for example ``Bitstream Vera Sans``. Defaults to
+ some generic font which is supposed to have fixed width.
+
+ `fontsize`
+ Size of the font used. Size is specified in half points. The
+ default is 24 half-points, giving a size 12 font.
+
+ .. versionadded:: 2.0
+ """
+ name = 'RTF'
+ aliases = ['rtf']
+ filenames = ['*.rtf']
+
+ def __init__(self, **options):
+ r"""
+ Additional options accepted:
+
+ ``fontface``
+ Name of the font used. Could for example be ``'Courier New'``
+ to further specify the default which is ``'\fmodern'``. The RTF
+ specification claims that ``\fmodern`` are "Fixed-pitch serif
+ and sans serif fonts". Hope every RTF implementation thinks
+ the same about modern...
+
+ """
+ Formatter.__init__(self, **options)
+ self.fontface = options.get('fontface') or ''
+ self.fontsize = get_int_opt(options, 'fontsize', 0)
+
+ def _escape(self, text):
+ return text.replace('\\', '\\\\') \
+ .replace('{', '\\{') \
+ .replace('}', '\\}')
+
+ def _escape_text(self, text):
+ # empty strings, should give a small performance improvement
+ if not text:
+ return ''
+
+ # escape text
+ text = self._escape(text)
+
+ buf = []
+ for c in text:
+ cn = ord(c)
+ if cn < (2**7):
+ # ASCII character
+ buf.append(str(c))
+ elif (2**7) <= cn < (2**16):
+ # single unicode escape sequence
+ buf.append('{\\u%d}' % cn)
+ elif (2**16) <= cn:
+ # RTF limits unicode to 16 bits.
+ # Force surrogate pairs
+ buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
+
+ return ''.join(buf).replace('\n', '\\par\n')
+
+ def format_unencoded(self, tokensource, outfile):
+ # rtf 1.8 header
+ outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
+ '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
+ '{\\colortbl;' % (self.fontface and
+ ' ' + self._escape(self.fontface) or
+ ''))
+
+ # convert colors and save them in a mapping to access them later.
+ color_mapping = {}
+ offset = 1
+ for _, style in self.style:
+ for color in style['color'], style['bgcolor'], style['border']:
+ if color and color not in color_mapping:
+ color_mapping[color] = offset
+ outfile.write('\\red%d\\green%d\\blue%d;' % (
+ int(color[0:2], 16),
+ int(color[2:4], 16),
+ int(color[4:6], 16)
+ ))
+ offset += 1
+ outfile.write('}\\f0 ')
+ if self.fontsize:
+ outfile.write('\\fs%d' % self.fontsize)
+
+ # highlight stream
+ for ttype, value in tokensource:
+ while not self.style.styles_token(ttype) and ttype.parent:
+ ttype = ttype.parent
+ style = self.style.style_for_token(ttype)
+ buf = []
+ if style['bgcolor']:
+ buf.append('\\cb%d' % color_mapping[style['bgcolor']])
+ if style['color']:
+ buf.append('\\cf%d' % color_mapping[style['color']])
+ if style['bold']:
+ buf.append('\\b')
+ if style['italic']:
+ buf.append('\\i')
+ if style['underline']:
+ buf.append('\\ul')
+ if style['border']:
+ buf.append('\\chbrdr\\chcfpat%d' %
+ color_mapping[style['border']])
+ start = ''.join(buf)
+ if start:
+ outfile.write('{%s ' % start)
+ outfile.write(self._escape_text(value))
+ if start:
+ outfile.write('}')
+
+ outfile.write('}')
diff --git a/pygments/formatters/svg.py b/pygments/formatters/svg.py
new file mode 100644
index 0000000..32d40cb
--- /dev/null
+++ b/pygments/formatters/svg.py
@@ -0,0 +1,188 @@
+"""
+ pygments.formatters.svg
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for SVG output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.formatter import Formatter
+from pygments.token import Comment
+from pygments.util import get_bool_opt, get_int_opt
+
+__all__ = ['SvgFormatter']
+
+
+def escape_html(text):
+ """Escape &, <, > as well as single and double quotes for HTML."""
+ return text.replace('&', '&amp;'). \
+ replace('<', '&lt;'). \
+ replace('>', '&gt;'). \
+ replace('"', '&quot;'). \
+ replace("'", '&#39;')
+
+
+class2style = {}
+
+class SvgFormatter(Formatter):
+ """
+ Format tokens as an SVG graphics file. This formatter is still experimental.
+ Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
+ coordinates containing ``<tspan>`` elements with the individual token styles.
+
+ By default, this formatter outputs a full SVG document including doctype
+ declaration and the ``<svg>`` root element.
+
+ .. versionadded:: 0.9
+
+ Additional options accepted:
+
+ `nowrap`
+ Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
+ don't add a XML declaration and a doctype. If true, the `fontfamily`
+ and `fontsize` options are ignored. Defaults to ``False``.
+
+ `fontfamily`
+ The value to give the wrapping ``<g>`` element's ``font-family``
+ attribute, defaults to ``"monospace"``.
+
+ `fontsize`
+ The value to give the wrapping ``<g>`` element's ``font-size``
+ attribute, defaults to ``"14px"``.
+
+ `linenos`
+ If ``True``, add line numbers (default: ``False``).
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ `linenowidth`
+ Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
+ for up to 4-digit line numbers. Increase width for longer code blocks).
+
+ `xoffset`
+ Starting offset in X direction, defaults to ``0``.
+
+ `yoffset`
+ Starting offset in Y direction, defaults to the font size if it is given
+ in pixels, or ``20`` else. (This is necessary since text coordinates
+ refer to the text baseline, not the top edge.)
+
+ `ystep`
+ Offset to add to the Y coordinate for each subsequent line. This should
+ roughly be the text size plus 5. It defaults to that value if the text
+ size is given in pixels, or ``25`` else.
+
+ `spacehack`
+ Convert spaces in the source to ``&#160;``, which are non-breaking
+ spaces. SVG provides the ``xml:space`` attribute to control how
+ whitespace inside tags is handled, in theory, the ``preserve`` value
+ could be used to keep all whitespace as-is. However, many current SVG
+ viewers don't obey that rule, so this option is provided as a workaround
+ and defaults to ``True``.
+ """
+ name = 'SVG'
+ aliases = ['svg']
+ filenames = ['*.svg']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
+ self.fontfamily = options.get('fontfamily', 'monospace')
+ self.fontsize = options.get('fontsize', '14px')
+ self.xoffset = get_int_opt(options, 'xoffset', 0)
+ fs = self.fontsize.strip()
+ if fs.endswith('px'): fs = fs[:-2].strip()
+ try:
+ int_fs = int(fs)
+ except:
+ int_fs = 20
+ self.yoffset = get_int_opt(options, 'yoffset', int_fs)
+ self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
+ self.spacehack = get_bool_opt(options, 'spacehack', True)
+ self.linenos = get_bool_opt(options,'linenos',False)
+ self.linenostart = get_int_opt(options,'linenostart',1)
+ self.linenostep = get_int_opt(options,'linenostep',1)
+ self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
+ self._stylecache = {}
+
+ def format_unencoded(self, tokensource, outfile):
+ """
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
+ tuples and write it into ``outfile``.
+
+ For our implementation we put all lines in their own 'line group'.
+ """
+ x = self.xoffset
+ y = self.yoffset
+ if not self.nowrap:
+ if self.encoding:
+ outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
+ self.encoding)
+ else:
+ outfile.write('<?xml version="1.0"?>\n')
+ outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
+ '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
+ 'svg10.dtd">\n')
+ outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
+ outfile.write('<g font-family="%s" font-size="%s">\n' %
+ (self.fontfamily, self.fontsize))
+
+ counter = self.linenostart
+ counter_step = self.linenostep
+ counter_style = self._get_style(Comment)
+ line_x = x
+
+ if self.linenos:
+ if counter % counter_step == 0:
+ outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' %
+ (x+self.linenowidth,y,counter_style,counter))
+ line_x += self.linenowidth + self.ystep
+ counter += 1
+
+ outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
+ for ttype, value in tokensource:
+ style = self._get_style(ttype)
+ tspan = style and '<tspan' + style + '>' or ''
+ tspanend = tspan and '</tspan>' or ''
+ value = escape_html(value)
+ if self.spacehack:
+ value = value.expandtabs().replace(' ', '&#160;')
+ parts = value.split('\n')
+ for part in parts[:-1]:
+ outfile.write(tspan + part + tspanend)
+ y += self.ystep
+ outfile.write('</text>\n')
+ if self.linenos and counter % counter_step == 0:
+ outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' %
+ (x+self.linenowidth,y,counter_style,counter))
+
+ counter += 1
+ outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y))
+ outfile.write(tspan + parts[-1] + tspanend)
+ outfile.write('</text>')
+
+ if not self.nowrap:
+ outfile.write('</g></svg>\n')
+
+ def _get_style(self, tokentype):
+ if tokentype in self._stylecache:
+ return self._stylecache[tokentype]
+ otokentype = tokentype
+ while not self.style.styles_token(tokentype):
+ tokentype = tokentype.parent
+ value = self.style.style_for_token(tokentype)
+ result = ''
+ if value['color']:
+ result = ' fill="#' + value['color'] + '"'
+ if value['bold']:
+ result += ' font-weight="bold"'
+ if value['italic']:
+ result += ' font-style="italic"'
+ self._stylecache[otokentype] = result
+ return result
diff --git a/pygments/formatters/terminal.py b/pygments/formatters/terminal.py
new file mode 100644
index 0000000..23d3a71
--- /dev/null
+++ b/pygments/formatters/terminal.py
@@ -0,0 +1,127 @@
+"""
+ pygments.formatters.terminal
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for terminal output with ANSI sequences.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.formatter import Formatter
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Token, Whitespace
+from pygments.console import ansiformat
+from pygments.util import get_choice_opt
+
+
+__all__ = ['TerminalFormatter']
+
+
+#: Map token types to a tuple of color values for light and dark
+#: backgrounds.
+TERMINAL_COLORS = {
+ Token: ('', ''),
+
+ Whitespace: ('gray', 'brightblack'),
+ Comment: ('gray', 'brightblack'),
+ Comment.Preproc: ('cyan', 'brightcyan'),
+ Keyword: ('blue', 'brightblue'),
+ Keyword.Type: ('cyan', 'brightcyan'),
+ Operator.Word: ('magenta', 'brightmagenta'),
+ Name.Builtin: ('cyan', 'brightcyan'),
+ Name.Function: ('green', 'brightgreen'),
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
+ Name.Class: ('_green_', '_brightgreen_'),
+ Name.Exception: ('cyan', 'brightcyan'),
+ Name.Decorator: ('brightblack', 'gray'),
+ Name.Variable: ('red', 'brightred'),
+ Name.Constant: ('red', 'brightred'),
+ Name.Attribute: ('cyan', 'brightcyan'),
+ Name.Tag: ('brightblue', 'brightblue'),
+ String: ('yellow', 'yellow'),
+ Number: ('blue', 'brightblue'),
+
+ Generic.Deleted: ('brightred', 'brightred'),
+ Generic.Inserted: ('green', 'brightgreen'),
+ Generic.Heading: ('**', '**'),
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
+ Generic.Prompt: ('**', '**'),
+ Generic.Error: ('brightred', 'brightred'),
+
+ Error: ('_brightred_', '_brightred_'),
+}
+
+
+class TerminalFormatter(Formatter):
+ r"""
+ Format tokens with ANSI color sequences, for output in a text console.
+ Color sequences are terminated at newlines, so that paging the output
+ works correctly.
+
+ The `get_style_defs()` method doesn't do anything special since there is
+ no support for common styles.
+
+ Options accepted:
+
+ `bg`
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
+ (default: ``"light"``).
+
+ `colorscheme`
+ A dictionary mapping token types to (lightbg, darkbg) color names or
+ ``None`` (default: ``None`` = use builtin colorscheme).
+
+ `linenos`
+ Set to ``True`` to have line numbers on the terminal output as well
+ (default: ``False`` = no line numbers).
+ """
+ name = 'Terminal'
+ aliases = ['terminal', 'console']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.darkbg = get_choice_opt(options, 'bg',
+ ['light', 'dark'], 'light') == 'dark'
+ self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
+ self.linenos = options.get('linenos', False)
+ self._lineno = 0
+
+ def format(self, tokensource, outfile):
+ return Formatter.format(self, tokensource, outfile)
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+ def _get_color(self, ttype):
+ # self.colorscheme is a dict containing usually generic types, so we
+ # have to walk the tree of dots. The base Token type must be a key,
+ # even if it's empty string, as in the default above.
+ colors = self.colorscheme.get(ttype)
+ while colors is None:
+ ttype = ttype.parent
+ colors = self.colorscheme.get(ttype)
+ return colors[self.darkbg]
+
+ def format_unencoded(self, tokensource, outfile):
+ if self.linenos:
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ color = self._get_color(ttype)
+
+ for line in value.splitlines(True):
+ if color:
+ outfile.write(ansiformat(color, line.rstrip('\n')))
+ else:
+ outfile.write(line.rstrip('\n'))
+ if line.endswith('\n'):
+ if self.linenos:
+ self._write_lineno(outfile)
+ else:
+ outfile.write('\n')
+
+ if self.linenos:
+ outfile.write("\n")
diff --git a/pygments/formatters/terminal256.py b/pygments/formatters/terminal256.py
new file mode 100644
index 0000000..addba42
--- /dev/null
+++ b/pygments/formatters/terminal256.py
@@ -0,0 +1,338 @@
+"""
+ pygments.formatters.terminal256
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for 256-color terminal output with ANSI sequences.
+
+ RGB-to-XTERM color conversion routines adapted from xterm256-conv
+ tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
+ by Wolfgang Frisch.
+
+ Formatter version 1.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# TODO:
+# - Options to map style's bold/underline/italic/border attributes
+# to some ANSI attrbutes (something like 'italic=underline')
+# - An option to output "style RGB to xterm RGB/index" conversion table
+# - An option to indicate that we are running in "reverse background"
+# xterm. This means that default colors are white-on-black, not
+# black-on-while, so colors like "white background" need to be converted
+# to "white background, black foreground", etc...
+
+from pygments.formatter import Formatter
+from pygments.console import codes
+from pygments.style import ansicolors
+
+
+__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
+
+
+class EscapeSequence:
+ def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
+ self.fg = fg
+ self.bg = bg
+ self.bold = bold
+ self.underline = underline
+ self.italic = italic
+
+ def escape(self, attrs):
+ if len(attrs):
+ return "\x1b[" + ";".join(attrs) + "m"
+ return ""
+
+ def color_string(self):
+ attrs = []
+ if self.fg is not None:
+ if self.fg in ansicolors:
+ esc = codes[self.fg.replace('ansi','')]
+ if ';01m' in esc:
+ self.bold = True
+ # extract fg color code.
+ attrs.append(esc[2:4])
+ else:
+ attrs.extend(("38", "5", "%i" % self.fg))
+ if self.bg is not None:
+ if self.bg in ansicolors:
+ esc = codes[self.bg.replace('ansi','')]
+ # extract fg color code, add 10 for bg.
+ attrs.append(str(int(esc[2:4])+10))
+ else:
+ attrs.extend(("48", "5", "%i" % self.bg))
+ if self.bold:
+ attrs.append("01")
+ if self.underline:
+ attrs.append("04")
+ if self.italic:
+ attrs.append("03")
+ return self.escape(attrs)
+
+ def true_color_string(self):
+ attrs = []
+ if self.fg:
+ attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
+ if self.bg:
+ attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
+ if self.bold:
+ attrs.append("01")
+ if self.underline:
+ attrs.append("04")
+ if self.italic:
+ attrs.append("03")
+ return self.escape(attrs)
+
+ def reset_string(self):
+ attrs = []
+ if self.fg is not None:
+ attrs.append("39")
+ if self.bg is not None:
+ attrs.append("49")
+ if self.bold or self.underline or self.italic:
+ attrs.append("00")
+ return self.escape(attrs)
+
+
+class Terminal256Formatter(Formatter):
+ """
+ Format tokens with ANSI color sequences, for output in a 256-color
+ terminal or console. Like in `TerminalFormatter` color sequences
+ are terminated at newlines, so that paging the output works correctly.
+
+ The formatter takes colors from a style defined by the `style` option
+ and converts them to nearest ANSI 256-color escape sequences. Bold and
+ underline attributes from the style are preserved (and displayed).
+
+ .. versionadded:: 0.9
+
+ .. versionchanged:: 2.2
+ If the used style defines foreground colors in the form ``#ansi*``, then
+ `Terminal256Formatter` will map these to non extended foreground color.
+ See :ref:`AnsiTerminalStyle` for more information.
+
+ .. versionchanged:: 2.4
+ The ANSI color names have been updated with names that are easier to
+ understand and align with colornames of other projects and terminals.
+ See :ref:`this table <new-ansi-color-names>` for more information.
+
+
+ Options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `linenos`
+ Set to ``True`` to have line numbers on the terminal output as well
+ (default: ``False`` = no line numbers).
+ """
+ name = 'Terminal256'
+ aliases = ['terminal256', 'console256', '256']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ self.xterm_colors = []
+ self.best_match = {}
+ self.style_string = {}
+
+ self.usebold = 'nobold' not in options
+ self.useunderline = 'nounderline' not in options
+ self.useitalic = 'noitalic' not in options
+
+ self._build_color_table() # build an RGB-to-256 color conversion table
+ self._setup_styles() # convert selected style's colors to term. colors
+
+ self.linenos = options.get('linenos', False)
+ self._lineno = 0
+
+ def _build_color_table(self):
+ # colors 0..15: 16 basic colors
+
+ self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
+ self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
+ self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
+ self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
+ self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
+ self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
+ self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
+ self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
+ self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
+ self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
+ self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
+ self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
+ self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
+ self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
+ self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
+ self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
+
+ # colors 16..232: the 6x6x6 color cube
+
+ valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
+
+ for i in range(217):
+ r = valuerange[(i // 36) % 6]
+ g = valuerange[(i // 6) % 6]
+ b = valuerange[i % 6]
+ self.xterm_colors.append((r, g, b))
+
+ # colors 233..253: grayscale
+
+ for i in range(1, 22):
+ v = 8 + i * 10
+ self.xterm_colors.append((v, v, v))
+
+ def _closest_color(self, r, g, b):
+ distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
+ match = 0
+
+ for i in range(0, 254):
+ values = self.xterm_colors[i]
+
+ rd = r - values[0]
+ gd = g - values[1]
+ bd = b - values[2]
+ d = rd*rd + gd*gd + bd*bd
+
+ if d < distance:
+ match = i
+ distance = d
+ return match
+
+ def _color_index(self, color):
+ index = self.best_match.get(color, None)
+ if color in ansicolors:
+ # strip the `ansi/#ansi` part and look up code
+ index = color
+ self.best_match[color] = index
+ if index is None:
+ try:
+ rgb = int(str(color), 16)
+ except ValueError:
+ rgb = 0
+
+ r = (rgb >> 16) & 0xff
+ g = (rgb >> 8) & 0xff
+ b = rgb & 0xff
+ index = self._closest_color(r, g, b)
+ self.best_match[color] = index
+ return index
+
+ def _setup_styles(self):
+ for ttype, ndef in self.style:
+ escape = EscapeSequence()
+ # get foreground from ansicolor if set
+ if ndef['ansicolor']:
+ escape.fg = self._color_index(ndef['ansicolor'])
+ elif ndef['color']:
+ escape.fg = self._color_index(ndef['color'])
+ if ndef['bgansicolor']:
+ escape.bg = self._color_index(ndef['bgansicolor'])
+ elif ndef['bgcolor']:
+ escape.bg = self._color_index(ndef['bgcolor'])
+ if self.usebold and ndef['bold']:
+ escape.bold = True
+ if self.useunderline and ndef['underline']:
+ escape.underline = True
+ if self.useitalic and ndef['italic']:
+ escape.italic = True
+ self.style_string[str(ttype)] = (escape.color_string(),
+ escape.reset_string())
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+ def format(self, tokensource, outfile):
+ return Formatter.format(self, tokensource, outfile)
+
+ def format_unencoded(self, tokensource, outfile):
+ if self.linenos:
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ not_found = True
+ while ttype and not_found:
+ try:
+ # outfile.write( "<" + str(ttype) + ">" )
+ on, off = self.style_string[str(ttype)]
+
+ # Like TerminalFormatter, add "reset colors" escape sequence
+ # on newline.
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ if line:
+ outfile.write(on + line + off)
+ if self.linenos:
+ self._write_lineno(outfile)
+ else:
+ outfile.write('\n')
+
+ if spl[-1]:
+ outfile.write(on + spl[-1] + off)
+
+ not_found = False
+ # outfile.write( '#' + str(ttype) + '#' )
+
+ except KeyError:
+ # ottype = ttype
+ ttype = ttype.parent
+ # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
+
+ if not_found:
+ outfile.write(value)
+
+ if self.linenos:
+ outfile.write("\n")
+
+
+
+class TerminalTrueColorFormatter(Terminal256Formatter):
+ r"""
+ Format tokens with ANSI color sequences, for output in a true-color
+ terminal or console. Like in `TerminalFormatter` color sequences
+ are terminated at newlines, so that paging the output works correctly.
+
+ .. versionadded:: 2.1
+
+ Options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+ """
+ name = 'TerminalTrueColor'
+ aliases = ['terminal16m', 'console16m', '16m']
+ filenames = []
+
+ def _build_color_table(self):
+ pass
+
+ def _color_tuple(self, color):
+ try:
+ rgb = int(str(color), 16)
+ except ValueError:
+ return None
+ r = (rgb >> 16) & 0xff
+ g = (rgb >> 8) & 0xff
+ b = rgb & 0xff
+ return (r, g, b)
+
+ def _setup_styles(self):
+ for ttype, ndef in self.style:
+ escape = EscapeSequence()
+ if ndef['color']:
+ escape.fg = self._color_tuple(ndef['color'])
+ if ndef['bgcolor']:
+ escape.bg = self._color_tuple(ndef['bgcolor'])
+ if self.usebold and ndef['bold']:
+ escape.bold = True
+ if self.useunderline and ndef['underline']:
+ escape.underline = True
+ if self.useitalic and ndef['italic']:
+ escape.italic = True
+ self.style_string[str(ttype)] = (escape.true_color_string(),
+ escape.reset_string())
diff --git a/pygments/lexer.py b/pygments/lexer.py
new file mode 100644
index 0000000..7cbec96
--- /dev/null
+++ b/pygments/lexer.py
@@ -0,0 +1,883 @@
+"""
+ pygments.lexer
+ ~~~~~~~~~~~~~~
+
+ Base lexer classes.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import sys
+import time
+
+from pygments.filter import apply_filters, Filter
+from pygments.filters import get_filter_by_name
+from pygments.token import Error, Text, Other, Whitespace, _TokenType
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ make_analysator, Future, guess_decode
+from pygments.regexopt import regex_opt
+
+__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
+ 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
+ 'default', 'words', 'line_re']
+
+line_re = re.compile('.*?\n')
+
+_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
+ (b'\xff\xfe\0\0', 'utf-32'),
+ (b'\0\0\xfe\xff', 'utf-32be'),
+ (b'\xff\xfe', 'utf-16'),
+ (b'\xfe\xff', 'utf-16be')]
+
+_default_analyse = staticmethod(lambda x: 0.0)
+
+
+class LexerMeta(type):
+ """
+ This metaclass automagically converts ``analyse_text`` methods into
+ static methods which always return float values.
+ """
+
+ def __new__(mcs, name, bases, d):
+ if 'analyse_text' in d:
+ d['analyse_text'] = make_analysator(d['analyse_text'])
+ return type.__new__(mcs, name, bases, d)
+
+
+class Lexer(metaclass=LexerMeta):
+ """
+ Lexer for a specific language.
+
+ Basic options recognized:
+ ``stripnl``
+ Strip leading and trailing newlines from the input (default: True).
+ ``stripall``
+ Strip all leading and trailing whitespace from the input
+ (default: False).
+ ``ensurenl``
+ Make sure that the input ends with a newline (default: True). This
+ is required for some lexers that consume input linewise.
+
+ .. versionadded:: 1.3
+
+ ``tabsize``
+ If given and greater than 0, expand tabs in the input (default: 0).
+ ``encoding``
+ If given, must be an encoding name. This encoding will be used to
+ convert the input string to Unicode, if it is not already a Unicode
+ string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
+ Latin1 detection. Can also be ``'chardet'`` to use the chardet
+ library, if it is installed.
+ ``inencoding``
+ Overrides the ``encoding`` if given.
+ """
+
+ #: Name of the lexer
+ name = None
+
+ #: URL of the language specification/definition
+ url = None
+
+ #: Shortcuts for the lexer
+ aliases = []
+
+ #: File name globs
+ filenames = []
+
+ #: Secondary file name globs
+ alias_filenames = []
+
+ #: MIME types
+ mimetypes = []
+
+ #: Priority, should multiple lexers match and no content is provided
+ priority = 0
+
+ def __init__(self, **options):
+ self.options = options
+ self.stripnl = get_bool_opt(options, 'stripnl', True)
+ self.stripall = get_bool_opt(options, 'stripall', False)
+ self.ensurenl = get_bool_opt(options, 'ensurenl', True)
+ self.tabsize = get_int_opt(options, 'tabsize', 0)
+ self.encoding = options.get('encoding', 'guess')
+ self.encoding = options.get('inencoding') or self.encoding
+ self.filters = []
+ for filter_ in get_list_opt(options, 'filters', ()):
+ self.add_filter(filter_)
+
+ def __repr__(self):
+ if self.options:
+ return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
+ self.options)
+ else:
+ return '<pygments.lexers.%s>' % self.__class__.__name__
+
+ def add_filter(self, filter_, **options):
+ """
+ Add a new stream filter to this lexer.
+ """
+ if not isinstance(filter_, Filter):
+ filter_ = get_filter_by_name(filter_, **options)
+ self.filters.append(filter_)
+
+ def analyse_text(text):
+ """
+ Has to return a float between ``0`` and ``1`` that indicates
+ if a lexer wants to highlight this text. Used by ``guess_lexer``.
+ If this method returns ``0`` it won't highlight it in any case, if
+ it returns ``1`` highlighting with this lexer is guaranteed.
+
+ The `LexerMeta` metaclass automatically wraps this function so
+ that it works like a static method (no ``self`` or ``cls``
+ parameter) and the return value is automatically converted to
+ `float`. If the return value is an object that is boolean `False`
+ it's the same as if the return values was ``0.0``.
+ """
+
+ def get_tokens(self, text, unfiltered=False):
+ """
+ Return an iterable of (tokentype, value) pairs generated from
+ `text`. If `unfiltered` is set to `True`, the filtering mechanism
+ is bypassed even if filters are defined.
+
+ Also preprocess the text, i.e. expand tabs and strip it if
+ wanted and applies registered filters.
+ """
+ if not isinstance(text, str):
+ if self.encoding == 'guess':
+ text, _ = guess_decode(text)
+ elif self.encoding == 'chardet':
+ try:
+ import chardet
+ except ImportError as e:
+ raise ImportError('To enable chardet encoding guessing, '
+ 'please install the chardet library '
+ 'from http://chardet.feedparser.org/') from e
+ # check for BOM first
+ decoded = None
+ for bom, encoding in _encoding_map:
+ if text.startswith(bom):
+ decoded = text[len(bom):].decode(encoding, 'replace')
+ break
+ # no BOM found, so use chardet
+ if decoded is None:
+ enc = chardet.detect(text[:1024]) # Guess using first 1KB
+ decoded = text.decode(enc.get('encoding') or 'utf-8',
+ 'replace')
+ text = decoded
+ else:
+ text = text.decode(self.encoding)
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
+ else:
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
+
+ # text now *is* a unicode string
+ text = text.replace('\r\n', '\n')
+ text = text.replace('\r', '\n')
+ if self.stripall:
+ text = text.strip()
+ elif self.stripnl:
+ text = text.strip('\n')
+ if self.tabsize > 0:
+ text = text.expandtabs(self.tabsize)
+ if self.ensurenl and not text.endswith('\n'):
+ text += '\n'
+
+ def streamer():
+ for _, t, v in self.get_tokens_unprocessed(text):
+ yield t, v
+ stream = streamer()
+ if not unfiltered:
+ stream = apply_filters(stream, self.filters, self)
+ return stream
+
+ def get_tokens_unprocessed(self, text):
+ """
+ Return an iterable of (index, tokentype, value) pairs where "index"
+ is the starting position of the token within the input text.
+
+ In subclasses, implement this method as a generator to
+ maximize effectiveness.
+ """
+ raise NotImplementedError
+
+
+class DelegatingLexer(Lexer):
+ """
+ This lexer takes two lexer as arguments. A root lexer and
+ a language lexer. First everything is scanned using the language
+ lexer, afterwards all ``Other`` tokens are lexed using the root
+ lexer.
+
+ The lexers from the ``template`` lexer package use this base lexer.
+ """
+
+ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
+ self.root_lexer = _root_lexer(**options)
+ self.language_lexer = _language_lexer(**options)
+ self.needle = _needle
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ buffered = ''
+ insertions = []
+ lng_buffer = []
+ for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
+ if t is self.needle:
+ if lng_buffer:
+ insertions.append((len(buffered), lng_buffer))
+ lng_buffer = []
+ buffered += v
+ else:
+ lng_buffer.append((i, t, v))
+ if lng_buffer:
+ insertions.append((len(buffered), lng_buffer))
+ return do_insertions(insertions,
+ self.root_lexer.get_tokens_unprocessed(buffered))
+
+
+# ------------------------------------------------------------------------------
+# RegexLexer and ExtendedRegexLexer
+#
+
+
+class include(str): # pylint: disable=invalid-name
+ """
+ Indicates that a state should include rules from another state.
+ """
+ pass
+
+
+class _inherit:
+ """
+ Indicates the a state should inherit from its superclass.
+ """
+ def __repr__(self):
+ return 'inherit'
+
+inherit = _inherit() # pylint: disable=invalid-name
+
+
+class combined(tuple): # pylint: disable=invalid-name
+ """
+ Indicates a state combined from multiple states.
+ """
+
+ def __new__(cls, *args):
+ return tuple.__new__(cls, args)
+
+ def __init__(self, *args):
+ # tuple.__init__ doesn't do anything
+ pass
+
+
+class _PseudoMatch:
+ """
+ A pseudo match object constructed from a string.
+ """
+
+ def __init__(self, start, text):
+ self._text = text
+ self._start = start
+
+ def start(self, arg=None):
+ return self._start
+
+ def end(self, arg=None):
+ return self._start + len(self._text)
+
+ def group(self, arg=None):
+ if arg:
+ raise IndexError('No such group')
+ return self._text
+
+ def groups(self):
+ return (self._text,)
+
+ def groupdict(self):
+ return {}
+
+
+def bygroups(*args):
+ """
+ Callback that yields multiple actions for each group in the match.
+ """
+ def callback(lexer, match, ctx=None):
+ for i, action in enumerate(args):
+ if action is None:
+ continue
+ elif type(action) is _TokenType:
+ data = match.group(i + 1)
+ if data:
+ yield match.start(i + 1), action, data
+ else:
+ data = match.group(i + 1)
+ if data is not None:
+ if ctx:
+ ctx.pos = match.start(i + 1)
+ for item in action(lexer,
+ _PseudoMatch(match.start(i + 1), data), ctx):
+ if item:
+ yield item
+ if ctx:
+ ctx.pos = match.end()
+ return callback
+
+
+class _This:
+ """
+ Special singleton used for indicating the caller class.
+ Used by ``using``.
+ """
+
+this = _This()
+
+
+def using(_other, **kwargs):
+ """
+ Callback that processes the match with a different lexer.
+
+ The keyword arguments are forwarded to the lexer, except `state` which
+ is handled separately.
+
+ `state` specifies the state that the new lexer will start in, and can
+ be an enumerable such as ('root', 'inline', 'string') or a simple
+ string which is assumed to be on top of the root state.
+
+ Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
+ """
+ gt_kwargs = {}
+ if 'state' in kwargs:
+ s = kwargs.pop('state')
+ if isinstance(s, (list, tuple)):
+ gt_kwargs['stack'] = s
+ else:
+ gt_kwargs['stack'] = ('root', s)
+
+ if _other is this:
+ def callback(lexer, match, ctx=None):
+ # if keyword arguments are given the callback
+ # function has to create a new lexer instance
+ if kwargs:
+ # XXX: cache that somehow
+ kwargs.update(lexer.options)
+ lx = lexer.__class__(**kwargs)
+ else:
+ lx = lexer
+ s = match.start()
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
+ yield i + s, t, v
+ if ctx:
+ ctx.pos = match.end()
+ else:
+ def callback(lexer, match, ctx=None):
+ # XXX: cache that somehow
+ kwargs.update(lexer.options)
+ lx = _other(**kwargs)
+
+ s = match.start()
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
+ yield i + s, t, v
+ if ctx:
+ ctx.pos = match.end()
+ return callback
+
+
+class default:
+ """
+ Indicates a state or state action (e.g. #pop) to apply.
+ For example default('#pop') is equivalent to ('', Token, '#pop')
+ Note that state tuples may be used as well.
+
+ .. versionadded:: 2.0
+ """
+ def __init__(self, state):
+ self.state = state
+
+
+class words(Future):
+ """
+ Indicates a list of literal words that is transformed into an optimized
+ regex that matches any of the words.
+
+ .. versionadded:: 2.0
+ """
+ def __init__(self, words, prefix='', suffix=''):
+ self.words = words
+ self.prefix = prefix
+ self.suffix = suffix
+
+ def get(self):
+ return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
+
+
+class RegexLexerMeta(LexerMeta):
+ """
+ Metaclass for RegexLexer, creates the self._tokens attribute from
+ self.tokens on the first instantiation.
+ """
+
+ def _process_regex(cls, regex, rflags, state):
+ """Preprocess the regular expression component of a token definition."""
+ if isinstance(regex, Future):
+ regex = regex.get()
+ return re.compile(regex, rflags).match
+
+ def _process_token(cls, token):
+ """Preprocess the token component of a token definition."""
+ assert type(token) is _TokenType or callable(token), \
+ 'token type must be simple type or callable, not %r' % (token,)
+ return token
+
+ def _process_new_state(cls, new_state, unprocessed, processed):
+ """Preprocess the state transition action of a token definition."""
+ if isinstance(new_state, str):
+ # an existing state
+ if new_state == '#pop':
+ return -1
+ elif new_state in unprocessed:
+ return (new_state,)
+ elif new_state == '#push':
+ return new_state
+ elif new_state[:5] == '#pop:':
+ return -int(new_state[5:])
+ else:
+ assert False, 'unknown new state %r' % new_state
+ elif isinstance(new_state, combined):
+ # combine a new state from existing ones
+ tmp_state = '_tmp_%d' % cls._tmpname
+ cls._tmpname += 1
+ itokens = []
+ for istate in new_state:
+ assert istate != new_state, 'circular state ref %r' % istate
+ itokens.extend(cls._process_state(unprocessed,
+ processed, istate))
+ processed[tmp_state] = itokens
+ return (tmp_state,)
+ elif isinstance(new_state, tuple):
+ # push more than one state
+ for istate in new_state:
+ assert (istate in unprocessed or
+ istate in ('#pop', '#push')), \
+ 'unknown new state ' + istate
+ return new_state
+ else:
+ assert False, 'unknown new state def %r' % new_state
+
+ def _process_state(cls, unprocessed, processed, state):
+ """Preprocess a single state definition."""
+ assert type(state) is str, "wrong state name %r" % state
+ assert state[0] != '#', "invalid state name %r" % state
+ if state in processed:
+ return processed[state]
+ tokens = processed[state] = []
+ rflags = cls.flags
+ for tdef in unprocessed[state]:
+ if isinstance(tdef, include):
+ # it's a state reference
+ assert tdef != state, "circular state reference %r" % state
+ tokens.extend(cls._process_state(unprocessed, processed,
+ str(tdef)))
+ continue
+ if isinstance(tdef, _inherit):
+ # should be processed already, but may not in the case of:
+ # 1. the state has no counterpart in any parent
+ # 2. the state includes more than one 'inherit'
+ continue
+ if isinstance(tdef, default):
+ new_state = cls._process_new_state(tdef.state, unprocessed, processed)
+ tokens.append((re.compile('').match, None, new_state))
+ continue
+
+ assert type(tdef) is tuple, "wrong rule def %r" % tdef
+
+ try:
+ rex = cls._process_regex(tdef[0], rflags, state)
+ except Exception as err:
+ raise ValueError("uncompilable regex %r in state %r of %r: %s" %
+ (tdef[0], state, cls, err)) from err
+
+ token = cls._process_token(tdef[1])
+
+ if len(tdef) == 2:
+ new_state = None
+ else:
+ new_state = cls._process_new_state(tdef[2],
+ unprocessed, processed)
+
+ tokens.append((rex, token, new_state))
+ return tokens
+
+ def process_tokendef(cls, name, tokendefs=None):
+ """Preprocess a dictionary of token definitions."""
+ processed = cls._all_tokens[name] = {}
+ tokendefs = tokendefs or cls.tokens[name]
+ for state in list(tokendefs):
+ cls._process_state(tokendefs, processed, state)
+ return processed
+
+ def get_tokendefs(cls):
+ """
+ Merge tokens from superclasses in MRO order, returning a single tokendef
+ dictionary.
+
+ Any state that is not defined by a subclass will be inherited
+ automatically. States that *are* defined by subclasses will, by
+ default, override that state in the superclass. If a subclass wishes to
+ inherit definitions from a superclass, it can use the special value
+ "inherit", which will cause the superclass' state definition to be
+ included at that point in the state.
+ """
+ tokens = {}
+ inheritable = {}
+ for c in cls.__mro__:
+ toks = c.__dict__.get('tokens', {})
+
+ for state, items in toks.items():
+ curitems = tokens.get(state)
+ if curitems is None:
+ # N.b. because this is assigned by reference, sufficiently
+ # deep hierarchies are processed incrementally (e.g. for
+ # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
+ # will not see any inherits in B).
+ tokens[state] = items
+ try:
+ inherit_ndx = items.index(inherit)
+ except ValueError:
+ continue
+ inheritable[state] = inherit_ndx
+ continue
+
+ inherit_ndx = inheritable.pop(state, None)
+ if inherit_ndx is None:
+ continue
+
+ # Replace the "inherit" value with the items
+ curitems[inherit_ndx:inherit_ndx+1] = items
+ try:
+ # N.b. this is the index in items (that is, the superclass
+ # copy), so offset required when storing below.
+ new_inh_ndx = items.index(inherit)
+ except ValueError:
+ pass
+ else:
+ inheritable[state] = inherit_ndx + new_inh_ndx
+
+ return tokens
+
+ def __call__(cls, *args, **kwds):
+ """Instantiate cls after preprocessing its token definitions."""
+ if '_tokens' not in cls.__dict__:
+ cls._all_tokens = {}
+ cls._tmpname = 0
+ if hasattr(cls, 'token_variants') and cls.token_variants:
+ # don't process yet
+ pass
+ else:
+ cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
+
+ return type.__call__(cls, *args, **kwds)
+
+
+class RegexLexer(Lexer, metaclass=RegexLexerMeta):
+ """
+ Base for simple stateful regular expression-based lexers.
+ Simplifies the lexing process so that you need only
+ provide a list of states and regular expressions.
+ """
+
+ #: Flags for compiling the regular expressions.
+ #: Defaults to MULTILINE.
+ flags = re.MULTILINE
+
+ #: At all time there is a stack of states. Initially, the stack contains
+ #: a single state 'root'. The top of the stack is called "the current state".
+ #:
+ #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
+ #:
+ #: ``new_state`` can be omitted to signify no state transition.
+ #: If ``new_state`` is a string, it is pushed on the stack. This ensure
+ #: the new current state is ``new_state``.
+ #: If ``new_state`` is a tuple of strings, all of those strings are pushed
+ #: on the stack and the current state will be the last element of the list.
+ #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
+ #: to signify a new, anonymous state combined from the rules of two
+ #: or more existing ones.
+ #: Furthermore, it can be '#pop' to signify going back one step in
+ #: the state stack, or '#push' to push the current state on the stack
+ #: again. Note that if you push while in a combined state, the combined
+ #: state itself is pushed, and not only the state in which the rule is
+ #: defined.
+ #:
+ #: The tuple can also be replaced with ``include('state')``, in which
+ #: case the rules from the state named by the string are included in the
+ #: current one.
+ tokens = {}
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ """
+ Split ``text`` into (tokentype, text) pairs.
+
+ ``stack`` is the initial stack (default: ``['root']``)
+ """
+ pos = 0
+ tokendefs = self._tokens
+ statestack = list(stack)
+ statetokens = tokendefs[statestack[-1]]
+ while 1:
+ for rexmatch, action, new_state in statetokens:
+ m = rexmatch(text, pos)
+ if m:
+ if action is not None:
+ if type(action) is _TokenType:
+ yield pos, action, m.group()
+ else:
+ yield from action(self, m)
+ pos = m.end()
+ if new_state is not None:
+ # state transition
+ if isinstance(new_state, tuple):
+ for state in new_state:
+ if state == '#pop':
+ if len(statestack) > 1:
+ statestack.pop()
+ elif state == '#push':
+ statestack.append(statestack[-1])
+ else:
+ statestack.append(state)
+ elif isinstance(new_state, int):
+ # pop, but keep at least one state on the stack
+ # (random code leading to unexpected pops should
+ # not allow exceptions)
+ if abs(new_state) >= len(statestack):
+ del statestack[1:]
+ else:
+ del statestack[new_state:]
+ elif new_state == '#push':
+ statestack.append(statestack[-1])
+ else:
+ assert False, "wrong state def: %r" % new_state
+ statetokens = tokendefs[statestack[-1]]
+ break
+ else:
+ # We are here only if all state tokens have been considered
+ # and there was not a match on any of them.
+ try:
+ if text[pos] == '\n':
+ # at EOL, reset state to "root"
+ statestack = ['root']
+ statetokens = tokendefs['root']
+ yield pos, Whitespace, '\n'
+ pos += 1
+ continue
+ yield pos, Error, text[pos]
+ pos += 1
+ except IndexError:
+ break
+
+
+class LexerContext:
+ """
+ A helper object that holds lexer position data.
+ """
+
+ def __init__(self, text, pos, stack=None, end=None):
+ self.text = text
+ self.pos = pos
+ self.end = end or len(text) # end=0 not supported ;-)
+ self.stack = stack or ['root']
+
+ def __repr__(self):
+ return 'LexerContext(%r, %r, %r)' % (
+ self.text, self.pos, self.stack)
+
+
+class ExtendedRegexLexer(RegexLexer):
+ """
+ A RegexLexer that uses a context object to store its state.
+ """
+
+ def get_tokens_unprocessed(self, text=None, context=None):
+ """
+ Split ``text`` into (tokentype, text) pairs.
+ If ``context`` is given, use this lexer context instead.
+ """
+ tokendefs = self._tokens
+ if not context:
+ ctx = LexerContext(text, 0)
+ statetokens = tokendefs['root']
+ else:
+ ctx = context
+ statetokens = tokendefs[ctx.stack[-1]]
+ text = ctx.text
+ while 1:
+ for rexmatch, action, new_state in statetokens:
+ m = rexmatch(text, ctx.pos, ctx.end)
+ if m:
+ if action is not None:
+ if type(action) is _TokenType:
+ yield ctx.pos, action, m.group()
+ ctx.pos = m.end()
+ else:
+ yield from action(self, m, ctx)
+ if not new_state:
+ # altered the state stack?
+ statetokens = tokendefs[ctx.stack[-1]]
+ # CAUTION: callback must set ctx.pos!
+ if new_state is not None:
+ # state transition
+ if isinstance(new_state, tuple):
+ for state in new_state:
+ if state == '#pop':
+ if len(ctx.stack) > 1:
+ ctx.stack.pop()
+ elif state == '#push':
+ ctx.stack.append(ctx.stack[-1])
+ else:
+ ctx.stack.append(state)
+ elif isinstance(new_state, int):
+ # see RegexLexer for why this check is made
+ if abs(new_state) >= len(ctx.stack):
+ del ctx.stack[1:]
+ else:
+ del ctx.stack[new_state:]
+ elif new_state == '#push':
+ ctx.stack.append(ctx.stack[-1])
+ else:
+ assert False, "wrong state def: %r" % new_state
+ statetokens = tokendefs[ctx.stack[-1]]
+ break
+ else:
+ try:
+ if ctx.pos >= ctx.end:
+ break
+ if text[ctx.pos] == '\n':
+ # at EOL, reset state to "root"
+ ctx.stack = ['root']
+ statetokens = tokendefs['root']
+ yield ctx.pos, Text, '\n'
+ ctx.pos += 1
+ continue
+ yield ctx.pos, Error, text[ctx.pos]
+ ctx.pos += 1
+ except IndexError:
+ break
+
+
+def do_insertions(insertions, tokens):
+ """
+ Helper for lexers which must combine the results of several
+ sublexers.
+
+ ``insertions`` is a list of ``(index, itokens)`` pairs.
+ Each ``itokens`` iterable should be inserted at position
+ ``index`` into the token stream given by the ``tokens``
+ argument.
+
+ The result is a combined token stream.
+
+ TODO: clean up the code here.
+ """
+ insertions = iter(insertions)
+ try:
+ index, itokens = next(insertions)
+ except StopIteration:
+ # no insertions
+ yield from tokens
+ return
+
+ realpos = None
+ insleft = True
+
+ # iterate over the token stream where we want to insert
+ # the tokens from the insertion list.
+ for i, t, v in tokens:
+ # first iteration. store the position of first item
+ if realpos is None:
+ realpos = i
+ oldi = 0
+ while insleft and i + len(v) >= index:
+ tmpval = v[oldi:index - i]
+ if tmpval:
+ yield realpos, t, tmpval
+ realpos += len(tmpval)
+ for it_index, it_token, it_value in itokens:
+ yield realpos, it_token, it_value
+ realpos += len(it_value)
+ oldi = index - i
+ try:
+ index, itokens = next(insertions)
+ except StopIteration:
+ insleft = False
+ break # not strictly necessary
+ if oldi < len(v):
+ yield realpos, t, v[oldi:]
+ realpos += len(v) - oldi
+
+ # leftover tokens
+ while insleft:
+ # no normal tokens, set realpos to zero
+ realpos = realpos or 0
+ for p, t, v in itokens:
+ yield realpos, t, v
+ realpos += len(v)
+ try:
+ index, itokens = next(insertions)
+ except StopIteration:
+ insleft = False
+ break # not strictly necessary
+
+
+class ProfilingRegexLexerMeta(RegexLexerMeta):
+ """Metaclass for ProfilingRegexLexer, collects regex timing info."""
+
+ def _process_regex(cls, regex, rflags, state):
+ if isinstance(regex, words):
+ rex = regex_opt(regex.words, prefix=regex.prefix,
+ suffix=regex.suffix)
+ else:
+ rex = regex
+ compiled = re.compile(rex, rflags)
+
+ def match_func(text, pos, endpos=sys.maxsize):
+ info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
+ t0 = time.time()
+ res = compiled.match(text, pos, endpos)
+ t1 = time.time()
+ info[0] += 1
+ info[1] += t1 - t0
+ return res
+ return match_func
+
+
+class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
+ """Drop-in replacement for RegexLexer that does profiling of its regexes."""
+
+ _prof_data = []
+ _prof_sort_index = 4 # defaults to time per call
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ # this needs to be a stack, since using(this) will produce nested calls
+ self.__class__._prof_data.append({})
+ yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
+ rawdata = self.__class__._prof_data.pop()
+ data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
+ n, 1000 * t, 1000 * t / n)
+ for ((s, r), (n, t)) in rawdata.items()),
+ key=lambda x: x[self._prof_sort_index],
+ reverse=True)
+ sum_total = sum(x[3] for x in data)
+
+ print()
+ print('Profiling result for %s lexing %d chars in %.3f ms' %
+ (self.__class__.__name__, len(text), sum_total))
+ print('=' * 110)
+ print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
+ print('-' * 110)
+ for d in data:
+ print('%-20s %-65s %5d %8.4f %8.4f' % d)
+ print('=' * 110)
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
new file mode 100644
index 0000000..83be0e4
--- /dev/null
+++ b/pygments/lexers/__init__.py
@@ -0,0 +1,334 @@
+"""
+ pygments.lexers
+ ~~~~~~~~~~~~~~~
+
+ Pygments lexers.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import types
+from fnmatch import fnmatch
+from os.path import basename
+
+from pygments.lexers._mapping import LEXERS
+from pygments.modeline import get_filetype_from_buffer
+from pygments.plugin import find_plugin_lexers
+from pygments.util import ClassNotFound, guess_decode
+
+COMPAT = {
+ 'Python3Lexer': 'PythonLexer',
+ 'Python3TracebackLexer': 'PythonTracebackLexer',
+}
+
+__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
+ 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
+
+_lexer_cache = {}
+
+def _load_lexers(module_name):
+ """Load a lexer (and all others in the module too)."""
+ mod = __import__(module_name, None, None, ['__all__'])
+ for lexer_name in mod.__all__:
+ cls = getattr(mod, lexer_name)
+ _lexer_cache[cls.name] = cls
+
+
+def get_all_lexers(plugins=True):
+ """Return a generator of tuples in the form ``(name, aliases,
+ filenames, mimetypes)`` of all know lexers.
+
+ If *plugins* is true (the default), plugin lexers supplied by entrypoints
+ are also returned. Otherwise, only builtin ones are considered.
+ """
+ for item in LEXERS.values():
+ yield item[1:]
+ if plugins:
+ for lexer in find_plugin_lexers():
+ yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
+
+
+def find_lexer_class(name):
+ """Lookup a lexer class by name.
+
+ Return None if not found.
+ """
+ if name in _lexer_cache:
+ return _lexer_cache[name]
+ # lookup builtin lexers
+ for module_name, lname, aliases, _, _ in LEXERS.values():
+ if name == lname:
+ _load_lexers(module_name)
+ return _lexer_cache[name]
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if cls.name == name:
+ return cls
+
+
+def find_lexer_class_by_name(_alias):
+ """Lookup a lexer class by alias.
+
+ Like `get_lexer_by_name`, but does not instantiate the class.
+
+ .. versionadded:: 2.2
+ """
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+ # lookup builtin lexers
+ for module_name, name, aliases, _, _ in LEXERS.values():
+ if _alias.lower() in aliases:
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ return _lexer_cache[name]
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if _alias.lower() in cls.aliases:
+ return cls
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+
+def get_lexer_by_name(_alias, **options):
+ """Get a lexer by an alias.
+
+ Raises ClassNotFound if not found.
+ """
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+ # lookup builtin lexers
+ for module_name, name, aliases, _, _ in LEXERS.values():
+ if _alias.lower() in aliases:
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ return _lexer_cache[name](**options)
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if _alias.lower() in cls.aliases:
+ return cls(**options)
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+
+def load_lexer_from_file(filename, lexername="CustomLexer", **options):
+ """Load a lexer from a file.
+
+ This method expects a file located relative to the current working
+ directory, which contains a Lexer class. By default, it expects the
+ Lexer to be name CustomLexer; you can specify your own class name
+ as the second argument to this function.
+
+ Users should be very careful with the input, because this method
+ is equivalent to running eval on the input file.
+
+ Raises ClassNotFound if there are any problems importing the Lexer.
+
+ .. versionadded:: 2.2
+ """
+ try:
+ # This empty dict will contain the namespace for the exec'd file
+ custom_namespace = {}
+ with open(filename, 'rb') as f:
+ exec(f.read(), custom_namespace)
+ # Retrieve the class `lexername` from that namespace
+ if lexername not in custom_namespace:
+ raise ClassNotFound('no valid %s class found in %s' %
+ (lexername, filename))
+ lexer_class = custom_namespace[lexername]
+ # And finally instantiate it with the options
+ return lexer_class(**options)
+ except OSError as err:
+ raise ClassNotFound('cannot read %s: %s' % (filename, err))
+ except ClassNotFound:
+ raise
+ except Exception as err:
+ raise ClassNotFound('error when loading custom lexer: %s' % err)
+
+
+def find_lexer_class_for_filename(_fn, code=None):
+ """Get a lexer for a filename.
+
+ If multiple lexers match the filename pattern, use ``analyse_text()`` to
+ figure out which one is more appropriate.
+
+ Returns None if not found.
+ """
+ matches = []
+ fn = basename(_fn)
+ for modname, name, _, filenames, _ in LEXERS.values():
+ for filename in filenames:
+ if fnmatch(fn, filename):
+ if name not in _lexer_cache:
+ _load_lexers(modname)
+ matches.append((_lexer_cache[name], filename))
+ for cls in find_plugin_lexers():
+ for filename in cls.filenames:
+ if fnmatch(fn, filename):
+ matches.append((cls, filename))
+
+ if isinstance(code, bytes):
+ # decode it, since all analyse_text functions expect unicode
+ code = guess_decode(code)
+
+ def get_rating(info):
+ cls, filename = info
+ # explicit patterns get a bonus
+ bonus = '*' not in filename and 0.5 or 0
+ # The class _always_ defines analyse_text because it's included in
+ # the Lexer class. The default implementation returns None which
+ # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
+ # to find lexers which need it overridden.
+ if code:
+ return cls.analyse_text(code) + bonus, cls.__name__
+ return cls.priority + bonus, cls.__name__
+
+ if matches:
+ matches.sort(key=get_rating)
+ # print "Possible lexers, after sort:", matches
+ return matches[-1][0]
+
+
+def get_lexer_for_filename(_fn, code=None, **options):
+ """Get a lexer for a filename.
+
+ If multiple lexers match the filename pattern, use ``analyse_text()`` to
+ figure out which one is more appropriate.
+
+ Raises ClassNotFound if not found.
+ """
+ res = find_lexer_class_for_filename(_fn, code)
+ if not res:
+ raise ClassNotFound('no lexer for filename %r found' % _fn)
+ return res(**options)
+
+
+def get_lexer_for_mimetype(_mime, **options):
+ """Get a lexer for a mimetype.
+
+ Raises ClassNotFound if not found.
+ """
+ for modname, name, _, _, mimetypes in LEXERS.values():
+ if _mime in mimetypes:
+ if name not in _lexer_cache:
+ _load_lexers(modname)
+ return _lexer_cache[name](**options)
+ for cls in find_plugin_lexers():
+ if _mime in cls.mimetypes:
+ return cls(**options)
+ raise ClassNotFound('no lexer for mimetype %r found' % _mime)
+
+
+def _iter_lexerclasses(plugins=True):
+ """Return an iterator over all lexer classes."""
+ for key in sorted(LEXERS):
+ module_name, name = LEXERS[key][:2]
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ yield _lexer_cache[name]
+ if plugins:
+ yield from find_plugin_lexers()
+
+
+def guess_lexer_for_filename(_fn, _text, **options):
+ """
+ Lookup all lexers that handle those filenames primary (``filenames``)
+ or secondary (``alias_filenames``). Then run a text analysis for those
+ lexers and choose the best result.
+
+ usage::
+
+ >>> from pygments.lexers import guess_lexer_for_filename
+ >>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
+ <pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
+ >>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
+ <pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
+ >>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
+ <pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
+ """
+ fn = basename(_fn)
+ primary = {}
+ matching_lexers = set()
+ for lexer in _iter_lexerclasses():
+ for filename in lexer.filenames:
+ if fnmatch(fn, filename):
+ matching_lexers.add(lexer)
+ primary[lexer] = True
+ for filename in lexer.alias_filenames:
+ if fnmatch(fn, filename):
+ matching_lexers.add(lexer)
+ primary[lexer] = False
+ if not matching_lexers:
+ raise ClassNotFound('no lexer for filename %r found' % fn)
+ if len(matching_lexers) == 1:
+ return matching_lexers.pop()(**options)
+ result = []
+ for lexer in matching_lexers:
+ rv = lexer.analyse_text(_text)
+ if rv == 1.0:
+ return lexer(**options)
+ result.append((rv, lexer))
+
+ def type_sort(t):
+ # sort by:
+ # - analyse score
+ # - is primary filename pattern?
+ # - priority
+ # - last resort: class name
+ return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
+ result.sort(key=type_sort)
+
+ return result[-1][1](**options)
+
+
+def guess_lexer(_text, **options):
+ """Guess a lexer by strong distinctions in the text (eg, shebang)."""
+
+ if not isinstance(_text, str):
+ inencoding = options.get('inencoding', options.get('encoding'))
+ if inencoding:
+ _text = _text.decode(inencoding or 'utf8')
+ else:
+ _text, _ = guess_decode(_text)
+
+ # try to get a vim modeline first
+ ft = get_filetype_from_buffer(_text)
+
+ if ft is not None:
+ try:
+ return get_lexer_by_name(ft, **options)
+ except ClassNotFound:
+ pass
+
+ best_lexer = [0.0, None]
+ for lexer in _iter_lexerclasses():
+ rv = lexer.analyse_text(_text)
+ if rv == 1.0:
+ return lexer(**options)
+ if rv > best_lexer[0]:
+ best_lexer[:] = (rv, lexer)
+ if not best_lexer[0] or best_lexer[1] is None:
+ raise ClassNotFound('no lexer matching the text found')
+ return best_lexer[1](**options)
+
+
+class _automodule(types.ModuleType):
+ """Automatically import lexers."""
+
+ def __getattr__(self, name):
+ info = LEXERS.get(name)
+ if info:
+ _load_lexers(info[0])
+ cls = _lexer_cache[info[1]]
+ setattr(self, name, cls)
+ return cls
+ if name in COMPAT:
+ return getattr(self, COMPAT[name])
+ raise AttributeError(name)
+
+
+oldmod = sys.modules[__name__]
+newmod = _automodule(__name__)
+newmod.__dict__.update(oldmod.__dict__)
+sys.modules[__name__] = newmod
+del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
diff --git a/pygments/lexers/_ada_builtins.py b/pygments/lexers/_ada_builtins.py
new file mode 100644
index 0000000..dce1a5b
--- /dev/null
+++ b/pygments/lexers/_ada_builtins.py
@@ -0,0 +1,103 @@
+"""
+ pygments.lexers._ada_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Ada builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+KEYWORD_LIST = (
+ 'abort',
+ 'abs',
+ 'abstract',
+ 'accept',
+ 'access',
+ 'aliased',
+ 'all',
+ 'array',
+ 'at',
+ 'begin',
+ 'body',
+ 'case',
+ 'constant',
+ 'declare',
+ 'delay',
+ 'delta',
+ 'digits',
+ 'do',
+ 'else',
+ 'elsif',
+ 'end',
+ 'entry',
+ 'exception',
+ 'exit',
+ 'interface',
+ 'for',
+ 'goto',
+ 'if',
+ 'is',
+ 'limited',
+ 'loop',
+ 'new',
+ 'null',
+ 'of',
+ 'or',
+ 'others',
+ 'out',
+ 'overriding',
+ 'pragma',
+ 'protected',
+ 'raise',
+ 'range',
+ 'record',
+ 'renames',
+ 'requeue',
+ 'return',
+ 'reverse',
+ 'select',
+ 'separate',
+ 'some',
+ 'subtype',
+ 'synchronized',
+ 'task',
+ 'tagged',
+ 'terminate',
+ 'then',
+ 'type',
+ 'until',
+ 'when',
+ 'while',
+ 'xor'
+)
+
+BUILTIN_LIST = (
+ 'Address',
+ 'Byte',
+ 'Boolean',
+ 'Character',
+ 'Controlled',
+ 'Count',
+ 'Cursor',
+ 'Duration',
+ 'File_Mode',
+ 'File_Type',
+ 'Float',
+ 'Generator',
+ 'Integer',
+ 'Long_Float',
+ 'Long_Integer',
+ 'Long_Long_Float',
+ 'Long_Long_Integer',
+ 'Natural',
+ 'Positive',
+ 'Reference_Type',
+ 'Short_Float',
+ 'Short_Integer',
+ 'Short_Short_Float',
+ 'Short_Short_Integer',
+ 'String',
+ 'Wide_Character',
+ 'Wide_String'
+)
diff --git a/pygments/lexers/_asy_builtins.py b/pygments/lexers/_asy_builtins.py
new file mode 100644
index 0000000..88c2038
--- /dev/null
+++ b/pygments/lexers/_asy_builtins.py
@@ -0,0 +1,1644 @@
+"""
+ pygments.lexers._asy_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file contains the asy-function names and asy-variable names of
+ Asymptote.
+
+ Do not edit the ASYFUNCNAME and ASYVARNAME sets by hand.
+ TODO: perl/python script in Asymptote SVN similar to asy-list.pl but only
+ for function and variable names.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+ASYFUNCNAME = {
+ 'AND',
+ 'Arc',
+ 'ArcArrow',
+ 'ArcArrows',
+ 'Arrow',
+ 'Arrows',
+ 'Automatic',
+ 'AvantGarde',
+ 'BBox',
+ 'BWRainbow',
+ 'BWRainbow2',
+ 'Bar',
+ 'Bars',
+ 'BeginArcArrow',
+ 'BeginArrow',
+ 'BeginBar',
+ 'BeginDotMargin',
+ 'BeginMargin',
+ 'BeginPenMargin',
+ 'Blank',
+ 'Bookman',
+ 'Bottom',
+ 'BottomTop',
+ 'Bounds',
+ 'Break',
+ 'Broken',
+ 'BrokenLog',
+ 'Ceil',
+ 'Circle',
+ 'CircleBarIntervalMarker',
+ 'Cos',
+ 'Courier',
+ 'CrossIntervalMarker',
+ 'DefaultFormat',
+ 'DefaultLogFormat',
+ 'Degrees',
+ 'Dir',
+ 'DotMargin',
+ 'DotMargins',
+ 'Dotted',
+ 'Draw',
+ 'Drawline',
+ 'Embed',
+ 'EndArcArrow',
+ 'EndArrow',
+ 'EndBar',
+ 'EndDotMargin',
+ 'EndMargin',
+ 'EndPenMargin',
+ 'Fill',
+ 'FillDraw',
+ 'Floor',
+ 'Format',
+ 'Full',
+ 'Gaussian',
+ 'Gaussrand',
+ 'Gaussrandpair',
+ 'Gradient',
+ 'Grayscale',
+ 'Helvetica',
+ 'Hermite',
+ 'HookHead',
+ 'InOutTicks',
+ 'InTicks',
+ 'J',
+ 'Label',
+ 'Landscape',
+ 'Left',
+ 'LeftRight',
+ 'LeftTicks',
+ 'Legend',
+ 'Linear',
+ 'Link',
+ 'Log',
+ 'LogFormat',
+ 'Margin',
+ 'Margins',
+ 'Mark',
+ 'MidArcArrow',
+ 'MidArrow',
+ 'NOT',
+ 'NewCenturySchoolBook',
+ 'NoBox',
+ 'NoMargin',
+ 'NoModifier',
+ 'NoTicks',
+ 'NoTicks3',
+ 'NoZero',
+ 'NoZeroFormat',
+ 'None',
+ 'OR',
+ 'OmitFormat',
+ 'OmitTick',
+ 'OutTicks',
+ 'Ox',
+ 'Oy',
+ 'Palatino',
+ 'PaletteTicks',
+ 'Pen',
+ 'PenMargin',
+ 'PenMargins',
+ 'Pentype',
+ 'Portrait',
+ 'RadialShade',
+ 'Rainbow',
+ 'Range',
+ 'Relative',
+ 'Right',
+ 'RightTicks',
+ 'Rotate',
+ 'Round',
+ 'SQR',
+ 'Scale',
+ 'ScaleX',
+ 'ScaleY',
+ 'ScaleZ',
+ 'Seascape',
+ 'Shift',
+ 'Sin',
+ 'Slant',
+ 'Spline',
+ 'StickIntervalMarker',
+ 'Straight',
+ 'Symbol',
+ 'Tan',
+ 'TeXify',
+ 'Ticks',
+ 'Ticks3',
+ 'TildeIntervalMarker',
+ 'TimesRoman',
+ 'Top',
+ 'TrueMargin',
+ 'UnFill',
+ 'UpsideDown',
+ 'Wheel',
+ 'X',
+ 'XEquals',
+ 'XOR',
+ 'XY',
+ 'XYEquals',
+ 'XYZero',
+ 'XYgrid',
+ 'XZEquals',
+ 'XZZero',
+ 'XZero',
+ 'XZgrid',
+ 'Y',
+ 'YEquals',
+ 'YXgrid',
+ 'YZ',
+ 'YZEquals',
+ 'YZZero',
+ 'YZero',
+ 'YZgrid',
+ 'Z',
+ 'ZX',
+ 'ZXgrid',
+ 'ZYgrid',
+ 'ZapfChancery',
+ 'ZapfDingbats',
+ '_cputime',
+ '_draw',
+ '_eval',
+ '_image',
+ '_labelpath',
+ '_projection',
+ '_strokepath',
+ '_texpath',
+ 'aCos',
+ 'aSin',
+ 'aTan',
+ 'abort',
+ 'abs',
+ 'accel',
+ 'acos',
+ 'acosh',
+ 'acot',
+ 'acsc',
+ 'add',
+ 'addArrow',
+ 'addMargins',
+ 'addSaveFunction',
+ 'addnode',
+ 'addnodes',
+ 'addpenarc',
+ 'addpenline',
+ 'addseg',
+ 'adjust',
+ 'alias',
+ 'align',
+ 'all',
+ 'altitude',
+ 'angabscissa',
+ 'angle',
+ 'angpoint',
+ 'animate',
+ 'annotate',
+ 'anticomplementary',
+ 'antipedal',
+ 'apply',
+ 'approximate',
+ 'arc',
+ 'arcarrowsize',
+ 'arccircle',
+ 'arcdir',
+ 'arcfromcenter',
+ 'arcfromfocus',
+ 'arclength',
+ 'arcnodesnumber',
+ 'arcpoint',
+ 'arcsubtended',
+ 'arcsubtendedcenter',
+ 'arctime',
+ 'arctopath',
+ 'array',
+ 'arrow',
+ 'arrow2',
+ 'arrowbase',
+ 'arrowbasepoints',
+ 'arrowsize',
+ 'asec',
+ 'asin',
+ 'asinh',
+ 'ask',
+ 'assert',
+ 'asy',
+ 'asycode',
+ 'asydir',
+ 'asyfigure',
+ 'asyfilecode',
+ 'asyinclude',
+ 'asywrite',
+ 'atan',
+ 'atan2',
+ 'atanh',
+ 'atbreakpoint',
+ 'atexit',
+ 'atime',
+ 'attach',
+ 'attract',
+ 'atupdate',
+ 'autoformat',
+ 'autoscale',
+ 'autoscale3',
+ 'axes',
+ 'axes3',
+ 'axialshade',
+ 'axis',
+ 'axiscoverage',
+ 'azimuth',
+ 'babel',
+ 'background',
+ 'bangles',
+ 'bar',
+ 'barmarksize',
+ 'barsize',
+ 'basealign',
+ 'baseline',
+ 'bbox',
+ 'beep',
+ 'begin',
+ 'beginclip',
+ 'begingroup',
+ 'beginpoint',
+ 'between',
+ 'bevel',
+ 'bezier',
+ 'bezierP',
+ 'bezierPP',
+ 'bezierPPP',
+ 'bezulate',
+ 'bibliography',
+ 'bibliographystyle',
+ 'binarytree',
+ 'binarytreeNode',
+ 'binomial',
+ 'binput',
+ 'bins',
+ 'bisector',
+ 'bisectorpoint',
+ 'blend',
+ 'boutput',
+ 'box',
+ 'bqe',
+ 'breakpoint',
+ 'breakpoints',
+ 'brick',
+ 'buildRestoreDefaults',
+ 'buildRestoreThunk',
+ 'buildcycle',
+ 'bulletcolor',
+ 'canonical',
+ 'canonicalcartesiansystem',
+ 'cartesiansystem',
+ 'case1',
+ 'case2',
+ 'case3',
+ 'cbrt',
+ 'cd',
+ 'ceil',
+ 'center',
+ 'centerToFocus',
+ 'centroid',
+ 'cevian',
+ 'change2',
+ 'changecoordsys',
+ 'checkSegment',
+ 'checkconditionlength',
+ 'checker',
+ 'checklengths',
+ 'checkposition',
+ 'checktriangle',
+ 'choose',
+ 'circle',
+ 'circlebarframe',
+ 'circlemarkradius',
+ 'circlenodesnumber',
+ 'circumcenter',
+ 'circumcircle',
+ 'clamped',
+ 'clear',
+ 'clip',
+ 'clipdraw',
+ 'close',
+ 'cmyk',
+ 'code',
+ 'colatitude',
+ 'collect',
+ 'collinear',
+ 'color',
+ 'colorless',
+ 'colors',
+ 'colorspace',
+ 'comma',
+ 'compassmark',
+ 'complement',
+ 'complementary',
+ 'concat',
+ 'concurrent',
+ 'cone',
+ 'conic',
+ 'conicnodesnumber',
+ 'conictype',
+ 'conj',
+ 'connect',
+ 'containmentTree',
+ 'contains',
+ 'contour',
+ 'contour3',
+ 'controlSpecifier',
+ 'convert',
+ 'coordinates',
+ 'coordsys',
+ 'copy',
+ 'cos',
+ 'cosh',
+ 'cot',
+ 'countIntersections',
+ 'cputime',
+ 'crop',
+ 'cropcode',
+ 'cross',
+ 'crossframe',
+ 'crosshatch',
+ 'crossmarksize',
+ 'csc',
+ 'cubicroots',
+ 'curabscissa',
+ 'curlSpecifier',
+ 'curpoint',
+ 'currentarrow',
+ 'currentexitfunction',
+ 'currentmomarrow',
+ 'currentpolarconicroutine',
+ 'curve',
+ 'cut',
+ 'cutafter',
+ 'cutbefore',
+ 'cyclic',
+ 'cylinder',
+ 'debugger',
+ 'deconstruct',
+ 'defaultdir',
+ 'defaultformat',
+ 'defaultpen',
+ 'defined',
+ 'degenerate',
+ 'degrees',
+ 'delete',
+ 'deletepreamble',
+ 'determinant',
+ 'diagonal',
+ 'diamond',
+ 'diffdiv',
+ 'dir',
+ 'dirSpecifier',
+ 'dirtime',
+ 'display',
+ 'distance',
+ 'divisors',
+ 'do_overpaint',
+ 'dot',
+ 'dotframe',
+ 'dotsize',
+ 'downcase',
+ 'draw',
+ 'drawAll',
+ 'drawDoubleLine',
+ 'drawFermion',
+ 'drawGhost',
+ 'drawGluon',
+ 'drawMomArrow',
+ 'drawPhoton',
+ 'drawScalar',
+ 'drawVertex',
+ 'drawVertexBox',
+ 'drawVertexBoxO',
+ 'drawVertexBoxX',
+ 'drawVertexO',
+ 'drawVertexOX',
+ 'drawVertexTriangle',
+ 'drawVertexTriangleO',
+ 'drawVertexX',
+ 'drawarrow',
+ 'drawarrow2',
+ 'drawline',
+ 'drawtick',
+ 'duplicate',
+ 'elle',
+ 'ellipse',
+ 'ellipsenodesnumber',
+ 'embed',
+ 'embed3',
+ 'empty',
+ 'enclose',
+ 'end',
+ 'endScript',
+ 'endclip',
+ 'endgroup',
+ 'endl',
+ 'endpoint',
+ 'endpoints',
+ 'eof',
+ 'eol',
+ 'equation',
+ 'equations',
+ 'erase',
+ 'erasestep',
+ 'erf',
+ 'erfc',
+ 'error',
+ 'errorbar',
+ 'errorbars',
+ 'eval',
+ 'excenter',
+ 'excircle',
+ 'exit',
+ 'exitXasyMode',
+ 'exitfunction',
+ 'exp',
+ 'expfactors',
+ 'expi',
+ 'expm1',
+ 'exradius',
+ 'extend',
+ 'extension',
+ 'extouch',
+ 'fabs',
+ 'factorial',
+ 'fermat',
+ 'fft',
+ 'fhorner',
+ 'figure',
+ 'file',
+ 'filecode',
+ 'fill',
+ 'filldraw',
+ 'filloutside',
+ 'fillrule',
+ 'filltype',
+ 'find',
+ 'finite',
+ 'finiteDifferenceJacobian',
+ 'firstcut',
+ 'firstframe',
+ 'fit',
+ 'fit2',
+ 'fixedscaling',
+ 'floor',
+ 'flush',
+ 'fmdefaults',
+ 'fmod',
+ 'focusToCenter',
+ 'font',
+ 'fontcommand',
+ 'fontsize',
+ 'foot',
+ 'format',
+ 'frac',
+ 'frequency',
+ 'fromCenter',
+ 'fromFocus',
+ 'fspline',
+ 'functionshade',
+ 'gamma',
+ 'generate_random_backtrace',
+ 'generateticks',
+ 'gergonne',
+ 'getc',
+ 'getint',
+ 'getpair',
+ 'getreal',
+ 'getstring',
+ 'gettriple',
+ 'gluon',
+ 'gouraudshade',
+ 'graph',
+ 'graphic',
+ 'gray',
+ 'grestore',
+ 'grid',
+ 'grid3',
+ 'gsave',
+ 'halfbox',
+ 'hatch',
+ 'hdiffdiv',
+ 'hermite',
+ 'hex',
+ 'histogram',
+ 'history',
+ 'hline',
+ 'hprojection',
+ 'hsv',
+ 'hyperbola',
+ 'hyperbolanodesnumber',
+ 'hyperlink',
+ 'hypot',
+ 'identity',
+ 'image',
+ 'incenter',
+ 'incentral',
+ 'incircle',
+ 'increasing',
+ 'incrementposition',
+ 'indexedTransform',
+ 'indexedfigure',
+ 'initXasyMode',
+ 'initdefaults',
+ 'input',
+ 'inradius',
+ 'insert',
+ 'inside',
+ 'integrate',
+ 'interactive',
+ 'interior',
+ 'interp',
+ 'interpolate',
+ 'intersect',
+ 'intersection',
+ 'intersectionpoint',
+ 'intersectionpoints',
+ 'intersections',
+ 'intouch',
+ 'inverse',
+ 'inversion',
+ 'invisible',
+ 'is3D',
+ 'isDuplicate',
+ 'isogonal',
+ 'isogonalconjugate',
+ 'isotomic',
+ 'isotomicconjugate',
+ 'isparabola',
+ 'italic',
+ 'item',
+ 'key',
+ 'kurtosis',
+ 'kurtosisexcess',
+ 'label',
+ 'labelaxis',
+ 'labelmargin',
+ 'labelpath',
+ 'labels',
+ 'labeltick',
+ 'labelx',
+ 'labelx3',
+ 'labely',
+ 'labely3',
+ 'labelz',
+ 'labelz3',
+ 'lastcut',
+ 'latex',
+ 'latitude',
+ 'latticeshade',
+ 'layer',
+ 'layout',
+ 'ldexp',
+ 'leastsquares',
+ 'legend',
+ 'legenditem',
+ 'length',
+ 'lift',
+ 'light',
+ 'limits',
+ 'line',
+ 'linear',
+ 'linecap',
+ 'lineinversion',
+ 'linejoin',
+ 'linemargin',
+ 'lineskip',
+ 'linetype',
+ 'linewidth',
+ 'link',
+ 'list',
+ 'lm_enorm',
+ 'lm_evaluate_default',
+ 'lm_lmdif',
+ 'lm_lmpar',
+ 'lm_minimize',
+ 'lm_print_default',
+ 'lm_print_quiet',
+ 'lm_qrfac',
+ 'lm_qrsolv',
+ 'locale',
+ 'locate',
+ 'locatefile',
+ 'location',
+ 'log',
+ 'log10',
+ 'log1p',
+ 'logaxiscoverage',
+ 'longitude',
+ 'lookup',
+ 'magnetize',
+ 'makeNode',
+ 'makedraw',
+ 'makepen',
+ 'map',
+ 'margin',
+ 'markangle',
+ 'markangleradius',
+ 'markanglespace',
+ 'markarc',
+ 'marker',
+ 'markinterval',
+ 'marknodes',
+ 'markrightangle',
+ 'markuniform',
+ 'mass',
+ 'masscenter',
+ 'massformat',
+ 'math',
+ 'max',
+ 'max3',
+ 'maxbezier',
+ 'maxbound',
+ 'maxcoords',
+ 'maxlength',
+ 'maxratio',
+ 'maxtimes',
+ 'mean',
+ 'medial',
+ 'median',
+ 'midpoint',
+ 'min',
+ 'min3',
+ 'minbezier',
+ 'minbound',
+ 'minipage',
+ 'minratio',
+ 'mintimes',
+ 'miterlimit',
+ 'momArrowPath',
+ 'momarrowsize',
+ 'monotonic',
+ 'multifigure',
+ 'nativeformat',
+ 'natural',
+ 'needshipout',
+ 'newl',
+ 'newpage',
+ 'newslide',
+ 'newton',
+ 'newtree',
+ 'nextframe',
+ 'nextnormal',
+ 'nextpage',
+ 'nib',
+ 'nodabscissa',
+ 'none',
+ 'norm',
+ 'normalvideo',
+ 'notaknot',
+ 'nowarn',
+ 'numberpage',
+ 'nurb',
+ 'object',
+ 'offset',
+ 'onpath',
+ 'opacity',
+ 'opposite',
+ 'orientation',
+ 'orig_circlenodesnumber',
+ 'orig_circlenodesnumber1',
+ 'orig_draw',
+ 'orig_ellipsenodesnumber',
+ 'orig_ellipsenodesnumber1',
+ 'orig_hyperbolanodesnumber',
+ 'orig_parabolanodesnumber',
+ 'origin',
+ 'orthic',
+ 'orthocentercenter',
+ 'outformat',
+ 'outline',
+ 'outprefix',
+ 'output',
+ 'overloadedMessage',
+ 'overwrite',
+ 'pack',
+ 'pad',
+ 'pairs',
+ 'palette',
+ 'parabola',
+ 'parabolanodesnumber',
+ 'parallel',
+ 'partialsum',
+ 'path',
+ 'path3',
+ 'pattern',
+ 'pause',
+ 'pdf',
+ 'pedal',
+ 'periodic',
+ 'perp',
+ 'perpendicular',
+ 'perpendicularmark',
+ 'phantom',
+ 'phi1',
+ 'phi2',
+ 'phi3',
+ 'photon',
+ 'piecewisestraight',
+ 'point',
+ 'polar',
+ 'polarconicroutine',
+ 'polargraph',
+ 'polygon',
+ 'postcontrol',
+ 'postscript',
+ 'pow10',
+ 'ppoint',
+ 'prc',
+ 'prc0',
+ 'precision',
+ 'precontrol',
+ 'prepend',
+ 'print_random_addresses',
+ 'project',
+ 'projection',
+ 'purge',
+ 'pwhermite',
+ 'quadrant',
+ 'quadraticroots',
+ 'quantize',
+ 'quarticroots',
+ 'quotient',
+ 'radialshade',
+ 'radians',
+ 'radicalcenter',
+ 'radicalline',
+ 'radius',
+ 'rand',
+ 'randompath',
+ 'rd',
+ 'readline',
+ 'realmult',
+ 'realquarticroots',
+ 'rectangle',
+ 'rectangular',
+ 'rectify',
+ 'reflect',
+ 'relabscissa',
+ 'relative',
+ 'relativedistance',
+ 'reldir',
+ 'relpoint',
+ 'reltime',
+ 'remainder',
+ 'remark',
+ 'removeDuplicates',
+ 'rename',
+ 'replace',
+ 'report',
+ 'resetdefaultpen',
+ 'restore',
+ 'restoredefaults',
+ 'reverse',
+ 'reversevideo',
+ 'rf',
+ 'rfind',
+ 'rgb',
+ 'rgba',
+ 'rgbint',
+ 'rms',
+ 'rotate',
+ 'rotateO',
+ 'rotation',
+ 'round',
+ 'roundbox',
+ 'roundedpath',
+ 'roundrectangle',
+ 'samecoordsys',
+ 'sameside',
+ 'sample',
+ 'save',
+ 'savedefaults',
+ 'saveline',
+ 'scale',
+ 'scale3',
+ 'scaleO',
+ 'scaleT',
+ 'scaleless',
+ 'scientific',
+ 'search',
+ 'searchtree',
+ 'sec',
+ 'secondaryX',
+ 'secondaryY',
+ 'seconds',
+ 'section',
+ 'sector',
+ 'seek',
+ 'seekeof',
+ 'segment',
+ 'sequence',
+ 'setpens',
+ 'sgn',
+ 'sgnd',
+ 'sharpangle',
+ 'sharpdegrees',
+ 'shift',
+ 'shiftless',
+ 'shipout',
+ 'shipout3',
+ 'show',
+ 'side',
+ 'simeq',
+ 'simpson',
+ 'sin',
+ 'single',
+ 'sinh',
+ 'size',
+ 'size3',
+ 'skewness',
+ 'skip',
+ 'slant',
+ 'sleep',
+ 'slope',
+ 'slopefield',
+ 'solve',
+ 'solveBVP',
+ 'sort',
+ 'sourceline',
+ 'sphere',
+ 'split',
+ 'sqrt',
+ 'square',
+ 'srand',
+ 'standardizecoordsys',
+ 'startScript',
+ 'startTrembling',
+ 'stdev',
+ 'step',
+ 'stickframe',
+ 'stickmarksize',
+ 'stickmarkspace',
+ 'stop',
+ 'straight',
+ 'straightness',
+ 'string',
+ 'stripdirectory',
+ 'stripextension',
+ 'stripfile',
+ 'strokepath',
+ 'subdivide',
+ 'subitem',
+ 'subpath',
+ 'substr',
+ 'sum',
+ 'surface',
+ 'symmedial',
+ 'symmedian',
+ 'system',
+ 'tab',
+ 'tableau',
+ 'tan',
+ 'tangent',
+ 'tangential',
+ 'tangents',
+ 'tanh',
+ 'tell',
+ 'tensionSpecifier',
+ 'tensorshade',
+ 'tex',
+ 'texcolor',
+ 'texify',
+ 'texpath',
+ 'texpreamble',
+ 'texreset',
+ 'texshipout',
+ 'texsize',
+ 'textpath',
+ 'thick',
+ 'thin',
+ 'tick',
+ 'tickMax',
+ 'tickMax3',
+ 'tickMin',
+ 'tickMin3',
+ 'ticklabelshift',
+ 'ticklocate',
+ 'tildeframe',
+ 'tildemarksize',
+ 'tile',
+ 'tiling',
+ 'time',
+ 'times',
+ 'title',
+ 'titlepage',
+ 'topbox',
+ 'transform',
+ 'transformation',
+ 'transpose',
+ 'tremble',
+ 'trembleFuzz',
+ 'tremble_circlenodesnumber',
+ 'tremble_circlenodesnumber1',
+ 'tremble_draw',
+ 'tremble_ellipsenodesnumber',
+ 'tremble_ellipsenodesnumber1',
+ 'tremble_hyperbolanodesnumber',
+ 'tremble_marknodes',
+ 'tremble_markuniform',
+ 'tremble_parabolanodesnumber',
+ 'triangle',
+ 'triangleAbc',
+ 'triangleabc',
+ 'triangulate',
+ 'tricoef',
+ 'tridiagonal',
+ 'trilinear',
+ 'trim',
+ 'trueMagnetize',
+ 'truepoint',
+ 'tube',
+ 'uncycle',
+ 'unfill',
+ 'uniform',
+ 'unit',
+ 'unitrand',
+ 'unitsize',
+ 'unityroot',
+ 'unstraighten',
+ 'upcase',
+ 'updatefunction',
+ 'uperiodic',
+ 'upscale',
+ 'uptodate',
+ 'usepackage',
+ 'usersetting',
+ 'usetypescript',
+ 'usleep',
+ 'value',
+ 'variance',
+ 'variancebiased',
+ 'vbox',
+ 'vector',
+ 'vectorfield',
+ 'verbatim',
+ 'view',
+ 'vline',
+ 'vperiodic',
+ 'vprojection',
+ 'warn',
+ 'warning',
+ 'windingnumber',
+ 'write',
+ 'xaxis',
+ 'xaxis3',
+ 'xaxis3At',
+ 'xaxisAt',
+ 'xequals',
+ 'xinput',
+ 'xlimits',
+ 'xoutput',
+ 'xpart',
+ 'xscale',
+ 'xscaleO',
+ 'xtick',
+ 'xtick3',
+ 'xtrans',
+ 'yaxis',
+ 'yaxis3',
+ 'yaxis3At',
+ 'yaxisAt',
+ 'yequals',
+ 'ylimits',
+ 'ypart',
+ 'yscale',
+ 'yscaleO',
+ 'ytick',
+ 'ytick3',
+ 'ytrans',
+ 'zaxis3',
+ 'zaxis3At',
+ 'zero',
+ 'zero3',
+ 'zlimits',
+ 'zpart',
+ 'ztick',
+ 'ztick3',
+ 'ztrans'
+}
+
+ASYVARNAME = {
+ 'AliceBlue',
+ 'Align',
+ 'Allow',
+ 'AntiqueWhite',
+ 'Apricot',
+ 'Aqua',
+ 'Aquamarine',
+ 'Aspect',
+ 'Azure',
+ 'BeginPoint',
+ 'Beige',
+ 'Bisque',
+ 'Bittersweet',
+ 'Black',
+ 'BlanchedAlmond',
+ 'Blue',
+ 'BlueGreen',
+ 'BlueViolet',
+ 'Both',
+ 'Break',
+ 'BrickRed',
+ 'Brown',
+ 'BurlyWood',
+ 'BurntOrange',
+ 'CCW',
+ 'CW',
+ 'CadetBlue',
+ 'CarnationPink',
+ 'Center',
+ 'Centered',
+ 'Cerulean',
+ 'Chartreuse',
+ 'Chocolate',
+ 'Coeff',
+ 'Coral',
+ 'CornflowerBlue',
+ 'Cornsilk',
+ 'Crimson',
+ 'Crop',
+ 'Cyan',
+ 'Dandelion',
+ 'DarkBlue',
+ 'DarkCyan',
+ 'DarkGoldenrod',
+ 'DarkGray',
+ 'DarkGreen',
+ 'DarkKhaki',
+ 'DarkMagenta',
+ 'DarkOliveGreen',
+ 'DarkOrange',
+ 'DarkOrchid',
+ 'DarkRed',
+ 'DarkSalmon',
+ 'DarkSeaGreen',
+ 'DarkSlateBlue',
+ 'DarkSlateGray',
+ 'DarkTurquoise',
+ 'DarkViolet',
+ 'DeepPink',
+ 'DeepSkyBlue',
+ 'DefaultHead',
+ 'DimGray',
+ 'DodgerBlue',
+ 'Dotted',
+ 'Draw',
+ 'E',
+ 'ENE',
+ 'EPS',
+ 'ESE',
+ 'E_Euler',
+ 'E_PC',
+ 'E_RK2',
+ 'E_RK3BS',
+ 'Emerald',
+ 'EndPoint',
+ 'Euler',
+ 'Fill',
+ 'FillDraw',
+ 'FireBrick',
+ 'FloralWhite',
+ 'ForestGreen',
+ 'Fuchsia',
+ 'Gainsboro',
+ 'GhostWhite',
+ 'Gold',
+ 'Goldenrod',
+ 'Gray',
+ 'Green',
+ 'GreenYellow',
+ 'Honeydew',
+ 'HookHead',
+ 'Horizontal',
+ 'HotPink',
+ 'I',
+ 'IgnoreAspect',
+ 'IndianRed',
+ 'Indigo',
+ 'Ivory',
+ 'JOIN_IN',
+ 'JOIN_OUT',
+ 'JungleGreen',
+ 'Khaki',
+ 'LM_DWARF',
+ 'LM_MACHEP',
+ 'LM_SQRT_DWARF',
+ 'LM_SQRT_GIANT',
+ 'LM_USERTOL',
+ 'Label',
+ 'Lavender',
+ 'LavenderBlush',
+ 'LawnGreen',
+ 'LeftJustified',
+ 'LeftSide',
+ 'LemonChiffon',
+ 'LightBlue',
+ 'LightCoral',
+ 'LightCyan',
+ 'LightGoldenrodYellow',
+ 'LightGreen',
+ 'LightGrey',
+ 'LightPink',
+ 'LightSalmon',
+ 'LightSeaGreen',
+ 'LightSkyBlue',
+ 'LightSlateGray',
+ 'LightSteelBlue',
+ 'LightYellow',
+ 'Lime',
+ 'LimeGreen',
+ 'Linear',
+ 'Linen',
+ 'Log',
+ 'Logarithmic',
+ 'Magenta',
+ 'Mahogany',
+ 'Mark',
+ 'MarkFill',
+ 'Maroon',
+ 'Max',
+ 'MediumAquamarine',
+ 'MediumBlue',
+ 'MediumOrchid',
+ 'MediumPurple',
+ 'MediumSeaGreen',
+ 'MediumSlateBlue',
+ 'MediumSpringGreen',
+ 'MediumTurquoise',
+ 'MediumVioletRed',
+ 'Melon',
+ 'MidPoint',
+ 'MidnightBlue',
+ 'Min',
+ 'MintCream',
+ 'MistyRose',
+ 'Moccasin',
+ 'Move',
+ 'MoveQuiet',
+ 'Mulberry',
+ 'N',
+ 'NE',
+ 'NNE',
+ 'NNW',
+ 'NW',
+ 'NavajoWhite',
+ 'Navy',
+ 'NavyBlue',
+ 'NoAlign',
+ 'NoCrop',
+ 'NoFill',
+ 'NoSide',
+ 'OldLace',
+ 'Olive',
+ 'OliveDrab',
+ 'OliveGreen',
+ 'Orange',
+ 'OrangeRed',
+ 'Orchid',
+ 'Ox',
+ 'Oy',
+ 'PC',
+ 'PaleGoldenrod',
+ 'PaleGreen',
+ 'PaleTurquoise',
+ 'PaleVioletRed',
+ 'PapayaWhip',
+ 'Peach',
+ 'PeachPuff',
+ 'Periwinkle',
+ 'Peru',
+ 'PineGreen',
+ 'Pink',
+ 'Plum',
+ 'PowderBlue',
+ 'ProcessBlue',
+ 'Purple',
+ 'RK2',
+ 'RK3',
+ 'RK3BS',
+ 'RK4',
+ 'RK5',
+ 'RK5DP',
+ 'RK5F',
+ 'RawSienna',
+ 'Red',
+ 'RedOrange',
+ 'RedViolet',
+ 'Rhodamine',
+ 'RightJustified',
+ 'RightSide',
+ 'RosyBrown',
+ 'RoyalBlue',
+ 'RoyalPurple',
+ 'RubineRed',
+ 'S',
+ 'SE',
+ 'SSE',
+ 'SSW',
+ 'SW',
+ 'SaddleBrown',
+ 'Salmon',
+ 'SandyBrown',
+ 'SeaGreen',
+ 'Seashell',
+ 'Sepia',
+ 'Sienna',
+ 'Silver',
+ 'SimpleHead',
+ 'SkyBlue',
+ 'SlateBlue',
+ 'SlateGray',
+ 'Snow',
+ 'SpringGreen',
+ 'SteelBlue',
+ 'Suppress',
+ 'SuppressQuiet',
+ 'Tan',
+ 'TeXHead',
+ 'Teal',
+ 'TealBlue',
+ 'Thistle',
+ 'Ticksize',
+ 'Tomato',
+ 'Turquoise',
+ 'UnFill',
+ 'VERSION',
+ 'Value',
+ 'Vertical',
+ 'Violet',
+ 'VioletRed',
+ 'W',
+ 'WNW',
+ 'WSW',
+ 'Wheat',
+ 'White',
+ 'WhiteSmoke',
+ 'WildStrawberry',
+ 'XYAlign',
+ 'YAlign',
+ 'Yellow',
+ 'YellowGreen',
+ 'YellowOrange',
+ 'addpenarc',
+ 'addpenline',
+ 'align',
+ 'allowstepping',
+ 'angularsystem',
+ 'animationdelay',
+ 'appendsuffix',
+ 'arcarrowangle',
+ 'arcarrowfactor',
+ 'arrow2sizelimit',
+ 'arrowangle',
+ 'arrowbarb',
+ 'arrowdir',
+ 'arrowfactor',
+ 'arrowhookfactor',
+ 'arrowlength',
+ 'arrowsizelimit',
+ 'arrowtexfactor',
+ 'authorpen',
+ 'axis',
+ 'axiscoverage',
+ 'axislabelfactor',
+ 'background',
+ 'backgroundcolor',
+ 'backgroundpen',
+ 'barfactor',
+ 'barmarksizefactor',
+ 'basealign',
+ 'baselinetemplate',
+ 'beveljoin',
+ 'bigvertexpen',
+ 'bigvertexsize',
+ 'black',
+ 'blue',
+ 'bm',
+ 'bottom',
+ 'bp',
+ 'brown',
+ 'bullet',
+ 'byfoci',
+ 'byvertices',
+ 'camerafactor',
+ 'chartreuse',
+ 'circlemarkradiusfactor',
+ 'circlenodesnumberfactor',
+ 'circleprecision',
+ 'circlescale',
+ 'cm',
+ 'codefile',
+ 'codepen',
+ 'codeskip',
+ 'colorPen',
+ 'coloredNodes',
+ 'coloredSegments',
+ 'conditionlength',
+ 'conicnodesfactor',
+ 'count',
+ 'cputimeformat',
+ 'crossmarksizefactor',
+ 'currentcoordsys',
+ 'currentlight',
+ 'currentpatterns',
+ 'currentpen',
+ 'currentpicture',
+ 'currentposition',
+ 'currentprojection',
+ 'curvilinearsystem',
+ 'cuttings',
+ 'cyan',
+ 'darkblue',
+ 'darkbrown',
+ 'darkcyan',
+ 'darkgray',
+ 'darkgreen',
+ 'darkgrey',
+ 'darkmagenta',
+ 'darkolive',
+ 'darkred',
+ 'dashdotted',
+ 'dashed',
+ 'datepen',
+ 'dateskip',
+ 'debuggerlines',
+ 'debugging',
+ 'deepblue',
+ 'deepcyan',
+ 'deepgray',
+ 'deepgreen',
+ 'deepgrey',
+ 'deepmagenta',
+ 'deepred',
+ 'default',
+ 'defaultControl',
+ 'defaultS',
+ 'defaultbackpen',
+ 'defaultcoordsys',
+ 'defaultfilename',
+ 'defaultformat',
+ 'defaultmassformat',
+ 'defaultpen',
+ 'diagnostics',
+ 'differentlengths',
+ 'dot',
+ 'dotfactor',
+ 'dotframe',
+ 'dotted',
+ 'doublelinepen',
+ 'doublelinespacing',
+ 'down',
+ 'duplicateFuzz',
+ 'ellipsenodesnumberfactor',
+ 'eps',
+ 'epsgeo',
+ 'epsilon',
+ 'evenodd',
+ 'extendcap',
+ 'fermionpen',
+ 'figureborder',
+ 'figuremattpen',
+ 'firstnode',
+ 'firststep',
+ 'foregroundcolor',
+ 'fuchsia',
+ 'fuzz',
+ 'gapfactor',
+ 'ghostpen',
+ 'gluonamplitude',
+ 'gluonpen',
+ 'gluonratio',
+ 'gray',
+ 'green',
+ 'grey',
+ 'hatchepsilon',
+ 'havepagenumber',
+ 'heavyblue',
+ 'heavycyan',
+ 'heavygray',
+ 'heavygreen',
+ 'heavygrey',
+ 'heavymagenta',
+ 'heavyred',
+ 'hline',
+ 'hwratio',
+ 'hyperbolanodesnumberfactor',
+ 'identity4',
+ 'ignore',
+ 'inXasyMode',
+ 'inch',
+ 'inches',
+ 'includegraphicscommand',
+ 'inf',
+ 'infinity',
+ 'institutionpen',
+ 'intMax',
+ 'intMin',
+ 'invert',
+ 'invisible',
+ 'itempen',
+ 'itemskip',
+ 'itemstep',
+ 'labelmargin',
+ 'landscape',
+ 'lastnode',
+ 'left',
+ 'legendhskip',
+ 'legendlinelength',
+ 'legendmargin',
+ 'legendmarkersize',
+ 'legendmaxrelativewidth',
+ 'legendvskip',
+ 'lightblue',
+ 'lightcyan',
+ 'lightgray',
+ 'lightgreen',
+ 'lightgrey',
+ 'lightmagenta',
+ 'lightolive',
+ 'lightred',
+ 'lightyellow',
+ 'linemargin',
+ 'lm_infmsg',
+ 'lm_shortmsg',
+ 'longdashdotted',
+ 'longdashed',
+ 'magenta',
+ 'magneticPoints',
+ 'magneticRadius',
+ 'mantissaBits',
+ 'markangleradius',
+ 'markangleradiusfactor',
+ 'markanglespace',
+ 'markanglespacefactor',
+ 'mediumblue',
+ 'mediumcyan',
+ 'mediumgray',
+ 'mediumgreen',
+ 'mediumgrey',
+ 'mediummagenta',
+ 'mediumred',
+ 'mediumyellow',
+ 'middle',
+ 'minDistDefault',
+ 'minblockheight',
+ 'minblockwidth',
+ 'mincirclediameter',
+ 'minipagemargin',
+ 'minipagewidth',
+ 'minvertexangle',
+ 'miterjoin',
+ 'mm',
+ 'momarrowfactor',
+ 'momarrowlength',
+ 'momarrowmargin',
+ 'momarrowoffset',
+ 'momarrowpen',
+ 'monoPen',
+ 'morepoints',
+ 'nCircle',
+ 'newbulletcolor',
+ 'ngraph',
+ 'nil',
+ 'nmesh',
+ 'nobasealign',
+ 'nodeMarginDefault',
+ 'nodesystem',
+ 'nomarker',
+ 'nopoint',
+ 'noprimary',
+ 'nullpath',
+ 'nullpen',
+ 'numarray',
+ 'ocgindex',
+ 'oldbulletcolor',
+ 'olive',
+ 'orange',
+ 'origin',
+ 'overpaint',
+ 'page',
+ 'pageheight',
+ 'pagemargin',
+ 'pagenumberalign',
+ 'pagenumberpen',
+ 'pagenumberposition',
+ 'pagewidth',
+ 'paleblue',
+ 'palecyan',
+ 'palegray',
+ 'palegreen',
+ 'palegrey',
+ 'palemagenta',
+ 'palered',
+ 'paleyellow',
+ 'parabolanodesnumberfactor',
+ 'perpfactor',
+ 'phi',
+ 'photonamplitude',
+ 'photonpen',
+ 'photonratio',
+ 'pi',
+ 'pink',
+ 'plain',
+ 'plus',
+ 'preamblenodes',
+ 'pt',
+ 'purple',
+ 'r3',
+ 'r4a',
+ 'r4b',
+ 'randMax',
+ 'realDigits',
+ 'realEpsilon',
+ 'realMax',
+ 'realMin',
+ 'red',
+ 'relativesystem',
+ 'reverse',
+ 'right',
+ 'roundcap',
+ 'roundjoin',
+ 'royalblue',
+ 'salmon',
+ 'saveFunctions',
+ 'scalarpen',
+ 'sequencereal',
+ 'settings',
+ 'shipped',
+ 'signedtrailingzero',
+ 'solid',
+ 'springgreen',
+ 'sqrtEpsilon',
+ 'squarecap',
+ 'squarepen',
+ 'startposition',
+ 'stdin',
+ 'stdout',
+ 'stepfactor',
+ 'stepfraction',
+ 'steppagenumberpen',
+ 'stepping',
+ 'stickframe',
+ 'stickmarksizefactor',
+ 'stickmarkspacefactor',
+ 'textpen',
+ 'ticksize',
+ 'tildeframe',
+ 'tildemarksizefactor',
+ 'tinv',
+ 'titlealign',
+ 'titlepagepen',
+ 'titlepageposition',
+ 'titlepen',
+ 'titleskip',
+ 'top',
+ 'trailingzero',
+ 'treeLevelStep',
+ 'treeMinNodeWidth',
+ 'treeNodeStep',
+ 'trembleAngle',
+ 'trembleFrequency',
+ 'trembleRandom',
+ 'tremblingMode',
+ 'undefined',
+ 'unitcircle',
+ 'unitsquare',
+ 'up',
+ 'urlpen',
+ 'urlskip',
+ 'version',
+ 'vertexpen',
+ 'vertexsize',
+ 'viewportmargin',
+ 'viewportsize',
+ 'vline',
+ 'white',
+ 'wye',
+ 'xformStack',
+ 'yellow',
+ 'ylabelwidth',
+ 'zerotickfuzz',
+ 'zerowinding'
+}
diff --git a/pygments/lexers/_cl_builtins.py b/pygments/lexers/_cl_builtins.py
new file mode 100644
index 0000000..342e62f
--- /dev/null
+++ b/pygments/lexers/_cl_builtins.py
@@ -0,0 +1,231 @@
+"""
+ pygments.lexers._cl_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ ANSI Common Lisp builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+BUILTIN_FUNCTIONS = { # 638 functions
+ '<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
+ 'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
+ 'adjustable-array-p', 'adjust-array', 'allocate-instance',
+ 'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
+ 'apropos-list', 'aref', 'arithmetic-error-operands',
+ 'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
+ 'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
+ 'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
+ 'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
+ 'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
+ 'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
+ 'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
+ 'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
+ 'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
+ 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
+ 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
+ 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
+ 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
+ 'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
+ 'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
+ 'characterp', 'char-code', 'char-downcase', 'char-equal',
+ 'char-greaterp', 'char-int', 'char-lessp', 'char-name',
+ 'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
+ 'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
+ 'close', 'clrhash', 'code-char', 'coerce', 'compile',
+ 'compiled-function-p', 'compile-file', 'compile-file-pathname',
+ 'compiler-macro-function', 'complement', 'complex', 'complexp',
+ 'compute-applicable-methods', 'compute-restarts', 'concatenate',
+ 'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
+ 'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
+ 'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
+ 'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
+ 'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
+ 'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
+ 'delete-package', 'denominator', 'deposit-field', 'describe',
+ 'describe-object', 'digit-char', 'digit-char-p', 'directory',
+ 'directory-namestring', 'disassemble', 'documentation', 'dpb',
+ 'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
+ 'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
+ 'enough-namestring', 'ensure-directories-exist',
+ 'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
+ 'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
+ 'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
+ 'file-error-pathname', 'file-length', 'file-namestring',
+ 'file-position', 'file-string-length', 'file-write-date',
+ 'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
+ 'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
+ 'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
+ 'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
+ 'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
+ 'fround', 'ftruncate', 'funcall', 'function-keywords',
+ 'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
+ 'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
+ 'gethash', 'get-internal-real-time', 'get-internal-run-time',
+ 'get-macro-character', 'get-output-stream-string', 'get-properties',
+ 'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
+ 'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
+ 'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
+ 'host-namestring', 'identity', 'imagpart', 'import',
+ 'initialize-instance', 'input-stream-p', 'inspect',
+ 'integer-decode-float', 'integer-length', 'integerp',
+ 'interactive-stream-p', 'intern', 'intersection',
+ 'invalid-method-error', 'invoke-debugger', 'invoke-restart',
+ 'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
+ 'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
+ 'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
+ 'listen', 'list-length', 'listp', 'load',
+ 'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
+ 'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
+ 'logical-pathname-translations', 'logior', 'lognand', 'lognor',
+ 'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
+ 'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
+ 'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
+ 'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
+ 'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
+ 'make-instance', 'make-instances-obsolete', 'make-list',
+ 'make-load-form', 'make-load-form-saving-slots', 'make-package',
+ 'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
+ 'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
+ 'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
+ 'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
+ 'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
+ 'merge', 'merge-pathnames', 'method-combination-error',
+ 'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
+ 'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
+ 'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
+ 'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
+ 'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
+ 'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
+ 'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
+ 'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
+ 'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
+ 'package-name', 'package-nicknames', 'packagep',
+ 'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
+ 'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
+ 'pathname-device', 'pathname-directory', 'pathname-host',
+ 'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
+ 'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
+ 'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
+ 'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
+ 'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
+ 'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
+ 'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
+ 'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
+ 'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
+ 'read-from-string', 'read-line', 'read-preserving-whitespace',
+ 'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
+ 'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
+ 'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
+ 'remprop', 'rename-file', 'rename-package', 'replace', 'require',
+ 'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
+ 'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
+ 'search', 'second', 'set', 'set-difference',
+ 'set-dispatch-macro-character', 'set-exclusive-or',
+ 'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
+ 'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
+ 'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
+ 'simple-condition-format-arguments', 'simple-condition-format-control',
+ 'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
+ 'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
+ 'slot-unbound', 'slot-value', 'software-type', 'software-version',
+ 'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
+ 'standard-char-p', 'store-value', 'stream-element-type',
+ 'stream-error-stream', 'stream-external-format', 'streamp', 'string',
+ 'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
+ 'string-capitalize', 'string-downcase', 'string-equal',
+ 'string-greaterp', 'string-left-trim', 'string-lessp',
+ 'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
+ 'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
+ 'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
+ 'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
+ 'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
+ 'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
+ 'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
+ 'translate-logical-pathname', 'translate-pathname', 'tree-equal',
+ 'truename', 'truncate', 'two-way-stream-input-stream',
+ 'two-way-stream-output-stream', 'type-error-datum',
+ 'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
+ 'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
+ 'update-instance-for-different-class',
+ 'update-instance-for-redefined-class', 'upgraded-array-element-type',
+ 'upgraded-complex-part-type', 'upper-case-p', 'use-package',
+ 'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
+ 'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
+ 'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
+ 'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
+ 'y-or-n-p', 'zerop',
+}
+
+SPECIAL_FORMS = {
+ 'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
+ 'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
+ 'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
+ 'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
+ 'unwind-protect',
+}
+
+MACROS = {
+ 'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
+ 'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
+ 'define-compiler-macro', 'define-condition', 'define-method-combination',
+ 'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
+ 'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
+ 'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
+ 'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
+ 'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
+ 'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
+ 'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
+ 'multiple-value-setq', 'nth-value', 'or', 'pop',
+ 'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
+ 'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
+ 'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
+ 'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
+ 'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
+ 'with-condition-restarts', 'with-hash-table-iterator',
+ 'with-input-from-string', 'with-open-file', 'with-open-stream',
+ 'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
+ 'with-slots', 'with-standard-io-syntax',
+}
+
+LAMBDA_LIST_KEYWORDS = {
+ '&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
+ '&rest', '&whole',
+}
+
+DECLARATIONS = {
+ 'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
+ 'ignorable', 'notinline', 'type',
+}
+
+BUILTIN_TYPES = {
+ 'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
+ 'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
+ 'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
+ 'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
+ 'simple-vector', 'standard-char', 'unsigned-byte',
+
+ # Condition Types
+ 'arithmetic-error', 'cell-error', 'condition', 'control-error',
+ 'division-by-zero', 'end-of-file', 'error', 'file-error',
+ 'floating-point-inexact', 'floating-point-overflow',
+ 'floating-point-underflow', 'floating-point-invalid-operation',
+ 'parse-error', 'package-error', 'print-not-readable', 'program-error',
+ 'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
+ 'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
+ 'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
+ 'undefined-function', 'warning',
+}
+
+BUILTIN_CLASSES = {
+ 'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
+ 'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
+ 'file-stream', 'float', 'function', 'generic-function', 'hash-table',
+ 'integer', 'list', 'logical-pathname', 'method-combination', 'method',
+ 'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
+ 'real', 'random-state', 'restart', 'sequence', 'standard-class',
+ 'standard-generic-function', 'standard-method', 'standard-object',
+ 'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
+ 'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
+}
diff --git a/pygments/lexers/_cocoa_builtins.py b/pygments/lexers/_cocoa_builtins.py
new file mode 100644
index 0000000..5171139
--- /dev/null
+++ b/pygments/lexers/_cocoa_builtins.py
@@ -0,0 +1,75 @@
+"""
+ pygments.lexers._cocoa_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file defines a set of types used across Cocoa frameworks from Apple.
+ There is a list of @interfaces, @protocols and some other (structs, unions)
+
+ File may be also used as standalone generator for above.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+COCOA_INTERFACES = {'AAAttribution', 'ABNewPersonViewController', 'ABPeoplePickerNavigationController', 'ABPersonViewController', 'ABUnknownPersonViewController', 'ACAccount', 'ACAccountCredential', 'ACAccountStore', 'ACAccountType', 'ADBannerView', 'ADClient', 'ADInterstitialAd', 'ADInterstitialAdPresentationViewController', 'AEAssessmentConfiguration', 'AEAssessmentSession', 'ALAsset', 'ALAssetRepresentation', 'ALAssetsFilter', 'ALAssetsGroup', 'ALAssetsLibrary', 'APActivationPayload', 'ARAnchor', 'ARAppClipCodeAnchor', 'ARBody2D', 'ARBodyAnchor', 'ARBodyTrackingConfiguration', 'ARCamera', 'ARCoachingOverlayView', 'ARCollaborationData', 'ARConfiguration', 'ARDepthData', 'ARDirectionalLightEstimate', 'AREnvironmentProbeAnchor', 'ARFaceAnchor', 'ARFaceGeometry', 'ARFaceTrackingConfiguration', 'ARFrame', 'ARGeoAnchor', 'ARGeoTrackingConfiguration', 'ARGeoTrackingStatus', 'ARGeometryElement', 'ARGeometrySource', 'ARHitTestResult', 'ARImageAnchor', 'ARImageTrackingConfiguration', 'ARLightEstimate', 'ARMatteGenerator', 'ARMeshAnchor', 'ARMeshGeometry', 'ARObjectAnchor', 'ARObjectScanningConfiguration', 'AROrientationTrackingConfiguration', 'ARParticipantAnchor', 'ARPlaneAnchor', 'ARPlaneGeometry', 'ARPointCloud', 'ARPositionalTrackingConfiguration', 'ARQuickLookPreviewItem', 'ARRaycastQuery', 'ARRaycastResult', 'ARReferenceImage', 'ARReferenceObject', 'ARSCNFaceGeometry', 'ARSCNPlaneGeometry', 'ARSCNView', 'ARSKView', 'ARSession', 'ARSkeleton', 'ARSkeleton2D', 'ARSkeleton3D', 'ARSkeletonDefinition', 'ARTrackedRaycast', 'ARVideoFormat', 'ARView', 'ARWorldMap', 'ARWorldTrackingConfiguration', 'ASAccountAuthenticationModificationController', 'ASAccountAuthenticationModificationExtensionContext', 'ASAccountAuthenticationModificationReplacePasswordWithSignInWithAppleRequest', 'ASAccountAuthenticationModificationRequest', 'ASAccountAuthenticationModificationUpgradePasswordToStrongPasswordRequest', 'ASAccountAuthenticationModificationViewController', 'ASAuthorization', 'ASAuthorizationAppleIDButton', 'ASAuthorizationAppleIDCredential', 'ASAuthorizationAppleIDProvider', 'ASAuthorizationAppleIDRequest', 'ASAuthorizationController', 'ASAuthorizationOpenIDRequest', 'ASAuthorizationPasswordProvider', 'ASAuthorizationPasswordRequest', 'ASAuthorizationProviderExtensionAuthorizationRequest', 'ASAuthorizationRequest', 'ASAuthorizationSingleSignOnCredential', 'ASAuthorizationSingleSignOnProvider', 'ASAuthorizationSingleSignOnRequest', 'ASCredentialIdentityStore', 'ASCredentialIdentityStoreState', 'ASCredentialProviderExtensionContext', 'ASCredentialProviderViewController', 'ASCredentialServiceIdentifier', 'ASIdentifierManager', 'ASPasswordCredential', 'ASPasswordCredentialIdentity', 'ASWebAuthenticationSession', 'ASWebAuthenticationSessionRequest', 'ASWebAuthenticationSessionWebBrowserSessionManager', 'ATTrackingManager', 'AUAudioUnit', 'AUAudioUnitBus', 'AUAudioUnitBusArray', 'AUAudioUnitPreset', 'AUAudioUnitV2Bridge', 'AUAudioUnitViewConfiguration', 'AUParameter', 'AUParameterGroup', 'AUParameterNode', 'AUParameterTree', 'AUViewController', 'AVAggregateAssetDownloadTask', 'AVAsset', 'AVAssetCache', 'AVAssetDownloadStorageManagementPolicy', 'AVAssetDownloadStorageManager', 'AVAssetDownloadTask', 'AVAssetDownloadURLSession', 'AVAssetExportSession', 'AVAssetImageGenerator', 'AVAssetReader', 'AVAssetReaderAudioMixOutput', 'AVAssetReaderOutput', 'AVAssetReaderOutputMetadataAdaptor', 'AVAssetReaderSampleReferenceOutput', 'AVAssetReaderTrackOutput', 'AVAssetReaderVideoCompositionOutput', 'AVAssetResourceLoader', 'AVAssetResourceLoadingContentInformationRequest', 'AVAssetResourceLoadingDataRequest', 'AVAssetResourceLoadingRequest', 'AVAssetResourceLoadingRequestor', 'AVAssetResourceRenewalRequest', 'AVAssetSegmentReport', 'AVAssetSegmentReportSampleInformation', 'AVAssetSegmentTrackReport', 'AVAssetTrack', 'AVAssetTrackGroup', 'AVAssetTrackSegment', 'AVAssetWriter', 'AVAssetWriterInput', 'AVAssetWriterInputGroup', 'AVAssetWriterInputMetadataAdaptor', 'AVAssetWriterInputPassDescription', 'AVAssetWriterInputPixelBufferAdaptor', 'AVAsynchronousCIImageFilteringRequest', 'AVAsynchronousVideoCompositionRequest', 'AVAudioMix', 'AVAudioMixInputParameters', 'AVAudioSession', 'AVCameraCalibrationData', 'AVCaptureAudioChannel', 'AVCaptureAudioDataOutput', 'AVCaptureAudioFileOutput', 'AVCaptureAudioPreviewOutput', 'AVCaptureAutoExposureBracketedStillImageSettings', 'AVCaptureBracketedStillImageSettings', 'AVCaptureConnection', 'AVCaptureDataOutputSynchronizer', 'AVCaptureDepthDataOutput', 'AVCaptureDevice', 'AVCaptureDeviceDiscoverySession', 'AVCaptureDeviceFormat', 'AVCaptureDeviceInput', 'AVCaptureDeviceInputSource', 'AVCaptureFileOutput', 'AVCaptureInput', 'AVCaptureInputPort', 'AVCaptureManualExposureBracketedStillImageSettings', 'AVCaptureMetadataInput', 'AVCaptureMetadataOutput', 'AVCaptureMovieFileOutput', 'AVCaptureMultiCamSession', 'AVCaptureOutput', 'AVCapturePhoto', 'AVCapturePhotoBracketSettings', 'AVCapturePhotoOutput', 'AVCapturePhotoSettings', 'AVCaptureResolvedPhotoSettings', 'AVCaptureScreenInput', 'AVCaptureSession', 'AVCaptureStillImageOutput', 'AVCaptureSynchronizedData', 'AVCaptureSynchronizedDataCollection', 'AVCaptureSynchronizedDepthData', 'AVCaptureSynchronizedMetadataObjectData', 'AVCaptureSynchronizedSampleBufferData', 'AVCaptureSystemPressureState', 'AVCaptureVideoDataOutput', 'AVCaptureVideoPreviewLayer', 'AVComposition', 'AVCompositionTrack', 'AVCompositionTrackFormatDescriptionReplacement', 'AVCompositionTrackSegment', 'AVContentKeyRequest', 'AVContentKeyResponse', 'AVContentKeySession', 'AVDateRangeMetadataGroup', 'AVDepthData', 'AVDisplayCriteria', 'AVFragmentedAsset', 'AVFragmentedAssetMinder', 'AVFragmentedAssetTrack', 'AVFragmentedMovie', 'AVFragmentedMovieMinder', 'AVFragmentedMovieTrack', 'AVFrameRateRange', 'AVMediaDataStorage', 'AVMediaSelection', 'AVMediaSelectionGroup', 'AVMediaSelectionOption', 'AVMetadataBodyObject', 'AVMetadataCatBodyObject', 'AVMetadataDogBodyObject', 'AVMetadataFaceObject', 'AVMetadataGroup', 'AVMetadataHumanBodyObject', 'AVMetadataItem', 'AVMetadataItemFilter', 'AVMetadataItemValueRequest', 'AVMetadataMachineReadableCodeObject', 'AVMetadataObject', 'AVMetadataSalientObject', 'AVMovie', 'AVMovieTrack', 'AVMutableAssetDownloadStorageManagementPolicy', 'AVMutableAudioMix', 'AVMutableAudioMixInputParameters', 'AVMutableComposition', 'AVMutableCompositionTrack', 'AVMutableDateRangeMetadataGroup', 'AVMutableMediaSelection', 'AVMutableMetadataItem', 'AVMutableMovie', 'AVMutableMovieTrack', 'AVMutableTimedMetadataGroup', 'AVMutableVideoComposition', 'AVMutableVideoCompositionInstruction', 'AVMutableVideoCompositionLayerInstruction', 'AVOutputSettingsAssistant', 'AVPersistableContentKeyRequest', 'AVPictureInPictureController', 'AVPlayer', 'AVPlayerItem', 'AVPlayerItemAccessLog', 'AVPlayerItemAccessLogEvent', 'AVPlayerItemErrorLog', 'AVPlayerItemErrorLogEvent', 'AVPlayerItemLegibleOutput', 'AVPlayerItemMediaDataCollector', 'AVPlayerItemMetadataCollector', 'AVPlayerItemMetadataOutput', 'AVPlayerItemOutput', 'AVPlayerItemTrack', 'AVPlayerItemVideoOutput', 'AVPlayerLayer', 'AVPlayerLooper', 'AVPlayerMediaSelectionCriteria', 'AVPlayerViewController', 'AVPortraitEffectsMatte', 'AVQueuePlayer', 'AVRouteDetector', 'AVRoutePickerView', 'AVSampleBufferAudioRenderer', 'AVSampleBufferDisplayLayer', 'AVSampleBufferRenderSynchronizer', 'AVSemanticSegmentationMatte', 'AVSynchronizedLayer', 'AVTextStyleRule', 'AVTimedMetadataGroup', 'AVURLAsset', 'AVVideoComposition', 'AVVideoCompositionCoreAnimationTool', 'AVVideoCompositionInstruction', 'AVVideoCompositionLayerInstruction', 'AVVideoCompositionRenderContext', 'AVVideoCompositionRenderHint', 'AXCustomContent', 'BCChatAction', 'BCChatButton', 'BGAppRefreshTask', 'BGAppRefreshTaskRequest', 'BGProcessingTask', 'BGProcessingTaskRequest', 'BGTask', 'BGTaskRequest', 'BGTaskScheduler', 'CAAnimation', 'CAAnimationGroup', 'CABTMIDICentralViewController', 'CABTMIDILocalPeripheralViewController', 'CABasicAnimation', 'CADisplayLink', 'CAEAGLLayer', 'CAEmitterCell', 'CAEmitterLayer', 'CAGradientLayer', 'CAInterAppAudioSwitcherView', 'CAInterAppAudioTransportView', 'CAKeyframeAnimation', 'CALayer', 'CAMediaTimingFunction', 'CAMetalLayer', 'CAPropertyAnimation', 'CAReplicatorLayer', 'CAScrollLayer', 'CAShapeLayer', 'CASpringAnimation', 'CATextLayer', 'CATiledLayer', 'CATransaction', 'CATransformLayer', 'CATransition', 'CAValueFunction', 'CBATTRequest', 'CBAttribute', 'CBCentral', 'CBCentralManager', 'CBCharacteristic', 'CBDescriptor', 'CBL2CAPChannel', 'CBManager', 'CBMutableCharacteristic', 'CBMutableDescriptor', 'CBMutableService', 'CBPeer', 'CBPeripheral', 'CBPeripheralManager', 'CBService', 'CBUUID', 'CHHapticDynamicParameter', 'CHHapticEngine', 'CHHapticEvent', 'CHHapticEventParameter', 'CHHapticParameterCurve', 'CHHapticParameterCurveControlPoint', 'CHHapticPattern', 'CIAztecCodeDescriptor', 'CIBarcodeDescriptor', 'CIBlendKernel', 'CIColor', 'CIColorKernel', 'CIContext', 'CIDataMatrixCodeDescriptor', 'CIDetector', 'CIFaceFeature', 'CIFeature', 'CIFilter', 'CIFilterGenerator', 'CIFilterShape', 'CIImage', 'CIImageAccumulator', 'CIImageProcessorKernel', 'CIKernel', 'CIPDF417CodeDescriptor', 'CIPlugIn', 'CIQRCodeDescriptor', 'CIQRCodeFeature', 'CIRectangleFeature', 'CIRenderDestination', 'CIRenderInfo', 'CIRenderTask', 'CISampler', 'CITextFeature', 'CIVector', 'CIWarpKernel', 'CKAcceptSharesOperation', 'CKAsset', 'CKContainer', 'CKDatabase', 'CKDatabaseNotification', 'CKDatabaseOperation', 'CKDatabaseSubscription', 'CKDiscoverAllUserIdentitiesOperation', 'CKDiscoverUserIdentitiesOperation', 'CKFetchDatabaseChangesOperation', 'CKFetchNotificationChangesOperation', 'CKFetchRecordChangesOperation', 'CKFetchRecordZoneChangesConfiguration', 'CKFetchRecordZoneChangesOperation', 'CKFetchRecordZoneChangesOptions', 'CKFetchRecordZonesOperation', 'CKFetchRecordsOperation', 'CKFetchShareMetadataOperation', 'CKFetchShareParticipantsOperation', 'CKFetchSubscriptionsOperation', 'CKFetchWebAuthTokenOperation', 'CKLocationSortDescriptor', 'CKMarkNotificationsReadOperation', 'CKModifyBadgeOperation', 'CKModifyRecordZonesOperation', 'CKModifyRecordsOperation', 'CKModifySubscriptionsOperation', 'CKNotification', 'CKNotificationID', 'CKNotificationInfo', 'CKOperation', 'CKOperationConfiguration', 'CKOperationGroup', 'CKQuery', 'CKQueryCursor', 'CKQueryNotification', 'CKQueryOperation', 'CKQuerySubscription', 'CKRecord', 'CKRecordID', 'CKRecordZone', 'CKRecordZoneID', 'CKRecordZoneNotification', 'CKRecordZoneSubscription', 'CKReference', 'CKServerChangeToken', 'CKShare', 'CKShareMetadata', 'CKShareParticipant', 'CKSubscription', 'CKUserIdentity', 'CKUserIdentityLookupInfo', 'CLBeacon', 'CLBeaconIdentityConstraint', 'CLBeaconRegion', 'CLCircularRegion', 'CLFloor', 'CLGeocoder', 'CLHeading', 'CLKComplication', 'CLKComplicationDescriptor', 'CLKComplicationServer', 'CLKComplicationTemplate', 'CLKComplicationTemplateCircularSmallRingImage', 'CLKComplicationTemplateCircularSmallRingText', 'CLKComplicationTemplateCircularSmallSimpleImage', 'CLKComplicationTemplateCircularSmallSimpleText', 'CLKComplicationTemplateCircularSmallStackImage', 'CLKComplicationTemplateCircularSmallStackText', 'CLKComplicationTemplateExtraLargeColumnsText', 'CLKComplicationTemplateExtraLargeRingImage', 'CLKComplicationTemplateExtraLargeRingText', 'CLKComplicationTemplateExtraLargeSimpleImage', 'CLKComplicationTemplateExtraLargeSimpleText', 'CLKComplicationTemplateExtraLargeStackImage', 'CLKComplicationTemplateExtraLargeStackText', 'CLKComplicationTemplateGraphicBezelCircularText', 'CLKComplicationTemplateGraphicCircular', 'CLKComplicationTemplateGraphicCircularClosedGaugeImage', 'CLKComplicationTemplateGraphicCircularClosedGaugeText', 'CLKComplicationTemplateGraphicCircularImage', 'CLKComplicationTemplateGraphicCircularOpenGaugeImage', 'CLKComplicationTemplateGraphicCircularOpenGaugeRangeText', 'CLKComplicationTemplateGraphicCircularOpenGaugeSimpleText', 'CLKComplicationTemplateGraphicCircularStackImage', 'CLKComplicationTemplateGraphicCircularStackText', 'CLKComplicationTemplateGraphicCornerCircularImage', 'CLKComplicationTemplateGraphicCornerGaugeImage', 'CLKComplicationTemplateGraphicCornerGaugeText', 'CLKComplicationTemplateGraphicCornerStackText', 'CLKComplicationTemplateGraphicCornerTextImage', 'CLKComplicationTemplateGraphicExtraLargeCircular', 'CLKComplicationTemplateGraphicExtraLargeCircularClosedGaugeImage', 'CLKComplicationTemplateGraphicExtraLargeCircularClosedGaugeText', 'CLKComplicationTemplateGraphicExtraLargeCircularImage', 'CLKComplicationTemplateGraphicExtraLargeCircularOpenGaugeImage', 'CLKComplicationTemplateGraphicExtraLargeCircularOpenGaugeRangeText', 'CLKComplicationTemplateGraphicExtraLargeCircularOpenGaugeSimpleText', 'CLKComplicationTemplateGraphicExtraLargeCircularStackImage', 'CLKComplicationTemplateGraphicExtraLargeCircularStackText', 'CLKComplicationTemplateGraphicRectangularFullImage', 'CLKComplicationTemplateGraphicRectangularLargeImage', 'CLKComplicationTemplateGraphicRectangularStandardBody', 'CLKComplicationTemplateGraphicRectangularTextGauge', 'CLKComplicationTemplateModularLargeColumns', 'CLKComplicationTemplateModularLargeStandardBody', 'CLKComplicationTemplateModularLargeTable', 'CLKComplicationTemplateModularLargeTallBody', 'CLKComplicationTemplateModularSmallColumnsText', 'CLKComplicationTemplateModularSmallRingImage', 'CLKComplicationTemplateModularSmallRingText', 'CLKComplicationTemplateModularSmallSimpleImage', 'CLKComplicationTemplateModularSmallSimpleText', 'CLKComplicationTemplateModularSmallStackImage', 'CLKComplicationTemplateModularSmallStackText', 'CLKComplicationTemplateUtilitarianLargeFlat', 'CLKComplicationTemplateUtilitarianSmallFlat', 'CLKComplicationTemplateUtilitarianSmallRingImage', 'CLKComplicationTemplateUtilitarianSmallRingText', 'CLKComplicationTemplateUtilitarianSmallSquare', 'CLKComplicationTimelineEntry', 'CLKDateTextProvider', 'CLKFullColorImageProvider', 'CLKGaugeProvider', 'CLKImageProvider', 'CLKRelativeDateTextProvider', 'CLKSimpleGaugeProvider', 'CLKSimpleTextProvider', 'CLKTextProvider', 'CLKTimeIntervalGaugeProvider', 'CLKTimeIntervalTextProvider', 'CLKTimeTextProvider', 'CLKWatchFaceLibrary', 'CLLocation', 'CLLocationManager', 'CLPlacemark', 'CLRegion', 'CLSActivity', 'CLSActivityItem', 'CLSBinaryItem', 'CLSContext', 'CLSDataStore', 'CLSObject', 'CLSProgressReportingCapability', 'CLSQuantityItem', 'CLSScoreItem', 'CLVisit', 'CMAccelerometerData', 'CMAltimeter', 'CMAltitudeData', 'CMAttitude', 'CMDeviceMotion', 'CMDyskineticSymptomResult', 'CMFallDetectionEvent', 'CMFallDetectionManager', 'CMGyroData', 'CMHeadphoneMotionManager', 'CMLogItem', 'CMMagnetometerData', 'CMMotionActivity', 'CMMotionActivityManager', 'CMMotionManager', 'CMMovementDisorderManager', 'CMPedometer', 'CMPedometerData', 'CMPedometerEvent', 'CMRecordedAccelerometerData', 'CMRecordedRotationRateData', 'CMRotationRateData', 'CMSensorDataList', 'CMSensorRecorder', 'CMStepCounter', 'CMTremorResult', 'CNChangeHistoryAddContactEvent', 'CNChangeHistoryAddGroupEvent', 'CNChangeHistoryAddMemberToGroupEvent', 'CNChangeHistoryAddSubgroupToGroupEvent', 'CNChangeHistoryDeleteContactEvent', 'CNChangeHistoryDeleteGroupEvent', 'CNChangeHistoryDropEverythingEvent', 'CNChangeHistoryEvent', 'CNChangeHistoryFetchRequest', 'CNChangeHistoryRemoveMemberFromGroupEvent', 'CNChangeHistoryRemoveSubgroupFromGroupEvent', 'CNChangeHistoryUpdateContactEvent', 'CNChangeHistoryUpdateGroupEvent', 'CNContact', 'CNContactFetchRequest', 'CNContactFormatter', 'CNContactPickerViewController', 'CNContactProperty', 'CNContactRelation', 'CNContactStore', 'CNContactVCardSerialization', 'CNContactViewController', 'CNContactsUserDefaults', 'CNContainer', 'CNFetchRequest', 'CNFetchResult', 'CNGroup', 'CNInstantMessageAddress', 'CNLabeledValue', 'CNMutableContact', 'CNMutableGroup', 'CNMutablePostalAddress', 'CNPhoneNumber', 'CNPostalAddress', 'CNPostalAddressFormatter', 'CNSaveRequest', 'CNSocialProfile', 'CPActionSheetTemplate', 'CPAlertAction', 'CPAlertTemplate', 'CPBarButton', 'CPButton', 'CPContact', 'CPContactCallButton', 'CPContactDirectionsButton', 'CPContactMessageButton', 'CPContactTemplate', 'CPDashboardButton', 'CPDashboardController', 'CPGridButton', 'CPGridTemplate', 'CPImageSet', 'CPInformationItem', 'CPInformationRatingItem', 'CPInformationTemplate', 'CPInterfaceController', 'CPListImageRowItem', 'CPListItem', 'CPListSection', 'CPListTemplate', 'CPManeuver', 'CPMapButton', 'CPMapTemplate', 'CPMessageComposeBarButton', 'CPMessageListItem', 'CPMessageListItemLeadingConfiguration', 'CPMessageListItemTrailingConfiguration', 'CPNavigationAlert', 'CPNavigationSession', 'CPNowPlayingAddToLibraryButton', 'CPNowPlayingButton', 'CPNowPlayingImageButton', 'CPNowPlayingMoreButton', 'CPNowPlayingPlaybackRateButton', 'CPNowPlayingRepeatButton', 'CPNowPlayingShuffleButton', 'CPNowPlayingTemplate', 'CPPointOfInterest', 'CPPointOfInterestTemplate', 'CPRouteChoice', 'CPSearchTemplate', 'CPSessionConfiguration', 'CPTabBarTemplate', 'CPTemplate', 'CPTemplateApplicationDashboardScene', 'CPTemplateApplicationScene', 'CPTextButton', 'CPTravelEstimates', 'CPTrip', 'CPTripPreviewTextConfiguration', 'CPVoiceControlState', 'CPVoiceControlTemplate', 'CPWindow', 'CSCustomAttributeKey', 'CSIndexExtensionRequestHandler', 'CSLocalizedString', 'CSPerson', 'CSSearchQuery', 'CSSearchableIndex', 'CSSearchableItem', 'CSSearchableItemAttributeSet', 'CTCall', 'CTCallCenter', 'CTCarrier', 'CTCellularData', 'CTCellularPlanProvisioning', 'CTCellularPlanProvisioningRequest', 'CTSubscriber', 'CTSubscriberInfo', 'CTTelephonyNetworkInfo', 'CXAction', 'CXAnswerCallAction', 'CXCall', 'CXCallAction', 'CXCallController', 'CXCallDirectoryExtensionContext', 'CXCallDirectoryManager', 'CXCallDirectoryProvider', 'CXCallObserver', 'CXCallUpdate', 'CXEndCallAction', 'CXHandle', 'CXPlayDTMFCallAction', 'CXProvider', 'CXProviderConfiguration', 'CXSetGroupCallAction', 'CXSetHeldCallAction', 'CXSetMutedCallAction', 'CXStartCallAction', 'CXTransaction', 'DCAppAttestService', 'DCDevice', 'EAAccessory', 'EAAccessoryManager', 'EAGLContext', 'EAGLSharegroup', 'EASession', 'EAWiFiUnconfiguredAccessory', 'EAWiFiUnconfiguredAccessoryBrowser', 'EKAlarm', 'EKCalendar', 'EKCalendarChooser', 'EKCalendarItem', 'EKEvent', 'EKEventEditViewController', 'EKEventStore', 'EKEventViewController', 'EKObject', 'EKParticipant', 'EKRecurrenceDayOfWeek', 'EKRecurrenceEnd', 'EKRecurrenceRule', 'EKReminder', 'EKSource', 'EKStructuredLocation', 'ENExposureConfiguration', 'ENExposureDaySummary', 'ENExposureDetectionSummary', 'ENExposureInfo', 'ENExposureSummaryItem', 'ENExposureWindow', 'ENManager', 'ENScanInstance', 'ENTemporaryExposureKey', 'EntityRotationGestureRecognizer', 'EntityScaleGestureRecognizer', 'EntityTranslationGestureRecognizer', 'FPUIActionExtensionContext', 'FPUIActionExtensionViewController', 'GCColor', 'GCController', 'GCControllerAxisInput', 'GCControllerButtonInput', 'GCControllerDirectionPad', 'GCControllerElement', 'GCControllerTouchpad', 'GCDeviceBattery', 'GCDeviceCursor', 'GCDeviceHaptics', 'GCDeviceLight', 'GCDirectionalGamepad', 'GCDualShockGamepad', 'GCEventViewController', 'GCExtendedGamepad', 'GCExtendedGamepadSnapshot', 'GCGamepad', 'GCGamepadSnapshot', 'GCKeyboard', 'GCKeyboardInput', 'GCMicroGamepad', 'GCMicroGamepadSnapshot', 'GCMotion', 'GCMouse', 'GCMouseInput', 'GCPhysicalInputProfile', 'GCXboxGamepad', 'GKARC4RandomSource', 'GKAccessPoint', 'GKAchievement', 'GKAchievementChallenge', 'GKAchievementDescription', 'GKAchievementViewController', 'GKAgent', 'GKAgent2D', 'GKAgent3D', 'GKBasePlayer', 'GKBehavior', 'GKBillowNoiseSource', 'GKChallenge', 'GKChallengeEventHandler', 'GKCheckerboardNoiseSource', 'GKCircleObstacle', 'GKCloudPlayer', 'GKCoherentNoiseSource', 'GKComponent', 'GKComponentSystem', 'GKCompositeBehavior', 'GKConstantNoiseSource', 'GKCylindersNoiseSource', 'GKDecisionNode', 'GKDecisionTree', 'GKEntity', 'GKFriendRequestComposeViewController', 'GKGameCenterViewController', 'GKGameSession', 'GKGameSessionSharingViewController', 'GKGaussianDistribution', 'GKGoal', 'GKGraph', 'GKGraphNode', 'GKGraphNode2D', 'GKGraphNode3D', 'GKGridGraph', 'GKGridGraphNode', 'GKInvite', 'GKLeaderboard', 'GKLeaderboardEntry', 'GKLeaderboardScore', 'GKLeaderboardSet', 'GKLeaderboardViewController', 'GKLinearCongruentialRandomSource', 'GKLocalPlayer', 'GKMatch', 'GKMatchRequest', 'GKMatchmaker', 'GKMatchmakerViewController', 'GKMersenneTwisterRandomSource', 'GKMeshGraph', 'GKMinmaxStrategist', 'GKMonteCarloStrategist', 'GKNSPredicateRule', 'GKNoise', 'GKNoiseMap', 'GKNoiseSource', 'GKNotificationBanner', 'GKObstacle', 'GKObstacleGraph', 'GKOctree', 'GKOctreeNode', 'GKPath', 'GKPeerPickerController', 'GKPerlinNoiseSource', 'GKPlayer', 'GKPolygonObstacle', 'GKQuadtree', 'GKQuadtreeNode', 'GKRTree', 'GKRandomDistribution', 'GKRandomSource', 'GKRidgedNoiseSource', 'GKRule', 'GKRuleSystem', 'GKSCNNodeComponent', 'GKSKNodeComponent', 'GKSavedGame', 'GKScene', 'GKScore', 'GKScoreChallenge', 'GKSession', 'GKShuffledDistribution', 'GKSphereObstacle', 'GKSpheresNoiseSource', 'GKState', 'GKStateMachine', 'GKTurnBasedEventHandler', 'GKTurnBasedExchangeReply', 'GKTurnBasedMatch', 'GKTurnBasedMatchmakerViewController', 'GKTurnBasedParticipant', 'GKVoiceChat', 'GKVoiceChatService', 'GKVoronoiNoiseSource', 'GLKBaseEffect', 'GLKEffectProperty', 'GLKEffectPropertyFog', 'GLKEffectPropertyLight', 'GLKEffectPropertyMaterial', 'GLKEffectPropertyTexture', 'GLKEffectPropertyTransform', 'GLKMesh', 'GLKMeshBuffer', 'GLKMeshBufferAllocator', 'GLKReflectionMapEffect', 'GLKSkyboxEffect', 'GLKSubmesh', 'GLKTextureInfo', 'GLKTextureLoader', 'GLKView', 'GLKViewController', 'HKActivityMoveModeObject', 'HKActivityRingView', 'HKActivitySummary', 'HKActivitySummaryQuery', 'HKActivitySummaryType', 'HKAnchoredObjectQuery', 'HKAudiogramSample', 'HKAudiogramSampleType', 'HKAudiogramSensitivityPoint', 'HKBiologicalSexObject', 'HKBloodTypeObject', 'HKCDADocument', 'HKCDADocumentSample', 'HKCategorySample', 'HKCategoryType', 'HKCharacteristicType', 'HKClinicalRecord', 'HKClinicalType', 'HKCorrelation', 'HKCorrelationQuery', 'HKCorrelationType', 'HKCumulativeQuantitySample', 'HKCumulativeQuantitySeriesSample', 'HKDeletedObject', 'HKDevice', 'HKDiscreteQuantitySample', 'HKDocumentQuery', 'HKDocumentSample', 'HKDocumentType', 'HKElectrocardiogram', 'HKElectrocardiogramQuery', 'HKElectrocardiogramType', 'HKElectrocardiogramVoltageMeasurement', 'HKFHIRResource', 'HKFHIRVersion', 'HKFitzpatrickSkinTypeObject', 'HKHealthStore', 'HKHeartbeatSeriesBuilder', 'HKHeartbeatSeriesQuery', 'HKHeartbeatSeriesSample', 'HKLiveWorkoutBuilder', 'HKLiveWorkoutDataSource', 'HKObject', 'HKObjectType', 'HKObserverQuery', 'HKQuantity', 'HKQuantitySample', 'HKQuantitySeriesSampleBuilder', 'HKQuantitySeriesSampleQuery', 'HKQuantityType', 'HKQuery', 'HKQueryAnchor', 'HKSample', 'HKSampleQuery', 'HKSampleType', 'HKSeriesBuilder', 'HKSeriesSample', 'HKSeriesType', 'HKSource', 'HKSourceQuery', 'HKSourceRevision', 'HKStatistics', 'HKStatisticsCollection', 'HKStatisticsCollectionQuery', 'HKStatisticsQuery', 'HKUnit', 'HKWheelchairUseObject', 'HKWorkout', 'HKWorkoutBuilder', 'HKWorkoutConfiguration', 'HKWorkoutEvent', 'HKWorkoutRoute', 'HKWorkoutRouteBuilder', 'HKWorkoutRouteQuery', 'HKWorkoutSession', 'HKWorkoutType', 'HMAccessControl', 'HMAccessory', 'HMAccessoryBrowser', 'HMAccessoryCategory', 'HMAccessoryOwnershipToken', 'HMAccessoryProfile', 'HMAccessorySetupPayload', 'HMAction', 'HMActionSet', 'HMAddAccessoryRequest', 'HMCalendarEvent', 'HMCameraAudioControl', 'HMCameraControl', 'HMCameraProfile', 'HMCameraSettingsControl', 'HMCameraSnapshot', 'HMCameraSnapshotControl', 'HMCameraSource', 'HMCameraStream', 'HMCameraStreamControl', 'HMCameraView', 'HMCharacteristic', 'HMCharacteristicEvent', 'HMCharacteristicMetadata', 'HMCharacteristicThresholdRangeEvent', 'HMCharacteristicWriteAction', 'HMDurationEvent', 'HMEvent', 'HMEventTrigger', 'HMHome', 'HMHomeAccessControl', 'HMHomeManager', 'HMLocationEvent', 'HMMutableCalendarEvent', 'HMMutableCharacteristicEvent', 'HMMutableCharacteristicThresholdRangeEvent', 'HMMutableDurationEvent', 'HMMutableLocationEvent', 'HMMutablePresenceEvent', 'HMMutableSignificantTimeEvent', 'HMNetworkConfigurationProfile', 'HMNumberRange', 'HMPresenceEvent', 'HMRoom', 'HMService', 'HMServiceGroup', 'HMSignificantTimeEvent', 'HMTimeEvent', 'HMTimerTrigger', 'HMTrigger', 'HMUser', 'HMZone', 'ICCameraDevice', 'ICCameraFile', 'ICCameraFolder', 'ICCameraItem', 'ICDevice', 'ICDeviceBrowser', 'ICScannerBandData', 'ICScannerDevice', 'ICScannerFeature', 'ICScannerFeatureBoolean', 'ICScannerFeatureEnumeration', 'ICScannerFeatureRange', 'ICScannerFeatureTemplate', 'ICScannerFunctionalUnit', 'ICScannerFunctionalUnitDocumentFeeder', 'ICScannerFunctionalUnitFlatbed', 'ICScannerFunctionalUnitNegativeTransparency', 'ICScannerFunctionalUnitPositiveTransparency', 'ILCallClassificationRequest', 'ILCallCommunication', 'ILClassificationRequest', 'ILClassificationResponse', 'ILClassificationUIExtensionContext', 'ILClassificationUIExtensionViewController', 'ILCommunication', 'ILMessageClassificationRequest', 'ILMessageCommunication', 'ILMessageFilterExtension', 'ILMessageFilterExtensionContext', 'ILMessageFilterQueryRequest', 'ILMessageFilterQueryResponse', 'ILNetworkResponse', 'INAccountTypeResolutionResult', 'INActivateCarSignalIntent', 'INActivateCarSignalIntentResponse', 'INAddMediaIntent', 'INAddMediaIntentResponse', 'INAddMediaMediaDestinationResolutionResult', 'INAddMediaMediaItemResolutionResult', 'INAddTasksIntent', 'INAddTasksIntentResponse', 'INAddTasksTargetTaskListResolutionResult', 'INAddTasksTemporalEventTriggerResolutionResult', 'INAirline', 'INAirport', 'INAirportGate', 'INAppendToNoteIntent', 'INAppendToNoteIntentResponse', 'INBalanceAmount', 'INBalanceTypeResolutionResult', 'INBillDetails', 'INBillPayee', 'INBillPayeeResolutionResult', 'INBillTypeResolutionResult', 'INBoatReservation', 'INBoatTrip', 'INBookRestaurantReservationIntent', 'INBookRestaurantReservationIntentResponse', 'INBooleanResolutionResult', 'INBusReservation', 'INBusTrip', 'INCallCapabilityResolutionResult', 'INCallDestinationTypeResolutionResult', 'INCallRecord', 'INCallRecordFilter', 'INCallRecordResolutionResult', 'INCallRecordTypeOptionsResolutionResult', 'INCallRecordTypeResolutionResult', 'INCancelRideIntent', 'INCancelRideIntentResponse', 'INCancelWorkoutIntent', 'INCancelWorkoutIntentResponse', 'INCar', 'INCarAirCirculationModeResolutionResult', 'INCarAudioSourceResolutionResult', 'INCarDefrosterResolutionResult', 'INCarHeadUnit', 'INCarSeatResolutionResult', 'INCarSignalOptionsResolutionResult', 'INCreateNoteIntent', 'INCreateNoteIntentResponse', 'INCreateTaskListIntent', 'INCreateTaskListIntentResponse', 'INCurrencyAmount', 'INCurrencyAmountResolutionResult', 'INDailyRoutineRelevanceProvider', 'INDateComponentsRange', 'INDateComponentsRangeResolutionResult', 'INDateComponentsResolutionResult', 'INDateRelevanceProvider', 'INDateSearchTypeResolutionResult', 'INDefaultCardTemplate', 'INDeleteTasksIntent', 'INDeleteTasksIntentResponse', 'INDeleteTasksTaskListResolutionResult', 'INDeleteTasksTaskResolutionResult', 'INDoubleResolutionResult', 'INEndWorkoutIntent', 'INEndWorkoutIntentResponse', 'INEnergyResolutionResult', 'INEnumResolutionResult', 'INExtension', 'INFile', 'INFileResolutionResult', 'INFlight', 'INFlightReservation', 'INGetAvailableRestaurantReservationBookingDefaultsIntent', 'INGetAvailableRestaurantReservationBookingDefaultsIntentResponse', 'INGetAvailableRestaurantReservationBookingsIntent', 'INGetAvailableRestaurantReservationBookingsIntentResponse', 'INGetCarLockStatusIntent', 'INGetCarLockStatusIntentResponse', 'INGetCarPowerLevelStatusIntent', 'INGetCarPowerLevelStatusIntentResponse', 'INGetReservationDetailsIntent', 'INGetReservationDetailsIntentResponse', 'INGetRestaurantGuestIntent', 'INGetRestaurantGuestIntentResponse', 'INGetRideStatusIntent', 'INGetRideStatusIntentResponse', 'INGetUserCurrentRestaurantReservationBookingsIntent', 'INGetUserCurrentRestaurantReservationBookingsIntentResponse', 'INGetVisualCodeIntent', 'INGetVisualCodeIntentResponse', 'INImage', 'INImageNoteContent', 'INIntegerResolutionResult', 'INIntent', 'INIntentResolutionResult', 'INIntentResponse', 'INInteraction', 'INLengthResolutionResult', 'INListCarsIntent', 'INListCarsIntentResponse', 'INListRideOptionsIntent', 'INListRideOptionsIntentResponse', 'INLocationRelevanceProvider', 'INLocationSearchTypeResolutionResult', 'INLodgingReservation', 'INMassResolutionResult', 'INMediaAffinityTypeResolutionResult', 'INMediaDestination', 'INMediaDestinationResolutionResult', 'INMediaItem', 'INMediaItemResolutionResult', 'INMediaSearch', 'INMediaUserContext', 'INMessage', 'INMessageAttributeOptionsResolutionResult', 'INMessageAttributeResolutionResult', 'INNote', 'INNoteContent', 'INNoteContentResolutionResult', 'INNoteContentTypeResolutionResult', 'INNoteResolutionResult', 'INNotebookItemTypeResolutionResult', 'INObject', 'INObjectCollection', 'INObjectResolutionResult', 'INObjectSection', 'INOutgoingMessageTypeResolutionResult', 'INParameter', 'INPauseWorkoutIntent', 'INPauseWorkoutIntentResponse', 'INPayBillIntent', 'INPayBillIntentResponse', 'INPaymentAccount', 'INPaymentAccountResolutionResult', 'INPaymentAmount', 'INPaymentAmountResolutionResult', 'INPaymentMethod', 'INPaymentMethodResolutionResult', 'INPaymentRecord', 'INPaymentStatusResolutionResult', 'INPerson', 'INPersonHandle', 'INPersonResolutionResult', 'INPlacemarkResolutionResult', 'INPlayMediaIntent', 'INPlayMediaIntentResponse', 'INPlayMediaMediaItemResolutionResult', 'INPlayMediaPlaybackSpeedResolutionResult', 'INPlaybackQueueLocationResolutionResult', 'INPlaybackRepeatModeResolutionResult', 'INPreferences', 'INPriceRange', 'INRadioTypeResolutionResult', 'INRecurrenceRule', 'INRelativeReferenceResolutionResult', 'INRelativeSettingResolutionResult', 'INRelevanceProvider', 'INRelevantShortcut', 'INRelevantShortcutStore', 'INRentalCar', 'INRentalCarReservation', 'INRequestPaymentCurrencyAmountResolutionResult', 'INRequestPaymentIntent', 'INRequestPaymentIntentResponse', 'INRequestPaymentPayerResolutionResult', 'INRequestRideIntent', 'INRequestRideIntentResponse', 'INReservation', 'INReservationAction', 'INRestaurant', 'INRestaurantGuest', 'INRestaurantGuestDisplayPreferences', 'INRestaurantGuestResolutionResult', 'INRestaurantOffer', 'INRestaurantReservation', 'INRestaurantReservationBooking', 'INRestaurantReservationUserBooking', 'INRestaurantResolutionResult', 'INResumeWorkoutIntent', 'INResumeWorkoutIntentResponse', 'INRideCompletionStatus', 'INRideDriver', 'INRideFareLineItem', 'INRideOption', 'INRidePartySizeOption', 'INRideStatus', 'INRideVehicle', 'INSaveProfileInCarIntent', 'INSaveProfileInCarIntentResponse', 'INSearchCallHistoryIntent', 'INSearchCallHistoryIntentResponse', 'INSearchForAccountsIntent', 'INSearchForAccountsIntentResponse', 'INSearchForBillsIntent', 'INSearchForBillsIntentResponse', 'INSearchForMediaIntent', 'INSearchForMediaIntentResponse', 'INSearchForMediaMediaItemResolutionResult', 'INSearchForMessagesIntent', 'INSearchForMessagesIntentResponse', 'INSearchForNotebookItemsIntent', 'INSearchForNotebookItemsIntentResponse', 'INSearchForPhotosIntent', 'INSearchForPhotosIntentResponse', 'INSeat', 'INSendMessageAttachment', 'INSendMessageIntent', 'INSendMessageIntentResponse', 'INSendMessageRecipientResolutionResult', 'INSendPaymentCurrencyAmountResolutionResult', 'INSendPaymentIntent', 'INSendPaymentIntentResponse', 'INSendPaymentPayeeResolutionResult', 'INSendRideFeedbackIntent', 'INSendRideFeedbackIntentResponse', 'INSetAudioSourceInCarIntent', 'INSetAudioSourceInCarIntentResponse', 'INSetCarLockStatusIntent', 'INSetCarLockStatusIntentResponse', 'INSetClimateSettingsInCarIntent', 'INSetClimateSettingsInCarIntentResponse', 'INSetDefrosterSettingsInCarIntent', 'INSetDefrosterSettingsInCarIntentResponse', 'INSetMessageAttributeIntent', 'INSetMessageAttributeIntentResponse', 'INSetProfileInCarIntent', 'INSetProfileInCarIntentResponse', 'INSetRadioStationIntent', 'INSetRadioStationIntentResponse', 'INSetSeatSettingsInCarIntent', 'INSetSeatSettingsInCarIntentResponse', 'INSetTaskAttributeIntent', 'INSetTaskAttributeIntentResponse', 'INSetTaskAttributeTemporalEventTriggerResolutionResult', 'INShortcut', 'INSnoozeTasksIntent', 'INSnoozeTasksIntentResponse', 'INSnoozeTasksTaskResolutionResult', 'INSpatialEventTrigger', 'INSpatialEventTriggerResolutionResult', 'INSpeakableString', 'INSpeakableStringResolutionResult', 'INSpeedResolutionResult', 'INStartAudioCallIntent', 'INStartAudioCallIntentResponse', 'INStartCallCallCapabilityResolutionResult', 'INStartCallCallRecordToCallBackResolutionResult', 'INStartCallContactResolutionResult', 'INStartCallIntent', 'INStartCallIntentResponse', 'INStartPhotoPlaybackIntent', 'INStartPhotoPlaybackIntentResponse', 'INStartVideoCallIntent', 'INStartVideoCallIntentResponse', 'INStartWorkoutIntent', 'INStartWorkoutIntentResponse', 'INStringResolutionResult', 'INTask', 'INTaskList', 'INTaskListResolutionResult', 'INTaskPriorityResolutionResult', 'INTaskResolutionResult', 'INTaskStatusResolutionResult', 'INTemperatureResolutionResult', 'INTemporalEventTrigger', 'INTemporalEventTriggerResolutionResult', 'INTemporalEventTriggerTypeOptionsResolutionResult', 'INTermsAndConditions', 'INTextNoteContent', 'INTicketedEvent', 'INTicketedEventReservation', 'INTimeIntervalResolutionResult', 'INTrainReservation', 'INTrainTrip', 'INTransferMoneyIntent', 'INTransferMoneyIntentResponse', 'INUIAddVoiceShortcutButton', 'INUIAddVoiceShortcutViewController', 'INUIEditVoiceShortcutViewController', 'INURLResolutionResult', 'INUpcomingMediaManager', 'INUpdateMediaAffinityIntent', 'INUpdateMediaAffinityIntentResponse', 'INUpdateMediaAffinityMediaItemResolutionResult', 'INUserContext', 'INVisualCodeTypeResolutionResult', 'INVocabulary', 'INVoiceShortcut', 'INVoiceShortcutCenter', 'INVolumeResolutionResult', 'INWorkoutGoalUnitTypeResolutionResult', 'INWorkoutLocationTypeResolutionResult', 'IOSurface', 'JSContext', 'JSManagedValue', 'JSValue', 'JSVirtualMachine', 'LAContext', 'LPLinkMetadata', 'LPLinkView', 'LPMetadataProvider', 'MCAdvertiserAssistant', 'MCBrowserViewController', 'MCNearbyServiceAdvertiser', 'MCNearbyServiceBrowser', 'MCPeerID', 'MCSession', 'MDLAnimatedMatrix4x4', 'MDLAnimatedQuaternion', 'MDLAnimatedQuaternionArray', 'MDLAnimatedScalar', 'MDLAnimatedScalarArray', 'MDLAnimatedValue', 'MDLAnimatedVector2', 'MDLAnimatedVector3', 'MDLAnimatedVector3Array', 'MDLAnimatedVector4', 'MDLAnimationBindComponent', 'MDLAreaLight', 'MDLAsset', 'MDLBundleAssetResolver', 'MDLCamera', 'MDLCheckerboardTexture', 'MDLColorSwatchTexture', 'MDLLight', 'MDLLightProbe', 'MDLMaterial', 'MDLMaterialProperty', 'MDLMaterialPropertyConnection', 'MDLMaterialPropertyGraph', 'MDLMaterialPropertyNode', 'MDLMatrix4x4Array', 'MDLMesh', 'MDLMeshBufferData', 'MDLMeshBufferDataAllocator', 'MDLMeshBufferMap', 'MDLMeshBufferZoneDefault', 'MDLNoiseTexture', 'MDLNormalMapTexture', 'MDLObject', 'MDLObjectContainer', 'MDLPackedJointAnimation', 'MDLPathAssetResolver', 'MDLPhotometricLight', 'MDLPhysicallyPlausibleLight', 'MDLPhysicallyPlausibleScatteringFunction', 'MDLRelativeAssetResolver', 'MDLScatteringFunction', 'MDLSkeleton', 'MDLSkyCubeTexture', 'MDLStereoscopicCamera', 'MDLSubmesh', 'MDLSubmeshTopology', 'MDLTexture', 'MDLTextureFilter', 'MDLTextureSampler', 'MDLTransform', 'MDLTransformMatrixOp', 'MDLTransformOrientOp', 'MDLTransformRotateOp', 'MDLTransformRotateXOp', 'MDLTransformRotateYOp', 'MDLTransformRotateZOp', 'MDLTransformScaleOp', 'MDLTransformStack', 'MDLTransformTranslateOp', 'MDLURLTexture', 'MDLVertexAttribute', 'MDLVertexAttributeData', 'MDLVertexBufferLayout', 'MDLVertexDescriptor', 'MDLVoxelArray', 'MFMailComposeViewController', 'MFMessageComposeViewController', 'MIDICIDeviceInfo', 'MIDICIDiscoveredNode', 'MIDICIDiscoveryManager', 'MIDICIProfile', 'MIDICIProfileState', 'MIDICIResponder', 'MIDICISession', 'MIDINetworkConnection', 'MIDINetworkHost', 'MIDINetworkSession', 'MKAnnotationView', 'MKCircle', 'MKCircleRenderer', 'MKCircleView', 'MKClusterAnnotation', 'MKCompassButton', 'MKDirections', 'MKDirectionsRequest', 'MKDirectionsResponse', 'MKDistanceFormatter', 'MKETAResponse', 'MKGeoJSONDecoder', 'MKGeoJSONFeature', 'MKGeodesicPolyline', 'MKGradientPolylineRenderer', 'MKLocalPointsOfInterestRequest', 'MKLocalSearch', 'MKLocalSearchCompleter', 'MKLocalSearchCompletion', 'MKLocalSearchRequest', 'MKLocalSearchResponse', 'MKMapCamera', 'MKMapCameraBoundary', 'MKMapCameraZoomRange', 'MKMapItem', 'MKMapSnapshot', 'MKMapSnapshotOptions', 'MKMapSnapshotter', 'MKMapView', 'MKMarkerAnnotationView', 'MKMultiPoint', 'MKMultiPolygon', 'MKMultiPolygonRenderer', 'MKMultiPolyline', 'MKMultiPolylineRenderer', 'MKOverlayPathRenderer', 'MKOverlayPathView', 'MKOverlayRenderer', 'MKOverlayView', 'MKPinAnnotationView', 'MKPitchControl', 'MKPlacemark', 'MKPointAnnotation', 'MKPointOfInterestFilter', 'MKPolygon', 'MKPolygonRenderer', 'MKPolygonView', 'MKPolyline', 'MKPolylineRenderer', 'MKPolylineView', 'MKReverseGeocoder', 'MKRoute', 'MKRouteStep', 'MKScaleView', 'MKShape', 'MKTileOverlay', 'MKTileOverlayRenderer', 'MKUserLocation', 'MKUserLocationView', 'MKUserTrackingBarButtonItem', 'MKUserTrackingButton', 'MKZoomControl', 'MLArrayBatchProvider', 'MLCActivationDescriptor', 'MLCActivationLayer', 'MLCArithmeticLayer', 'MLCBatchNormalizationLayer', 'MLCConcatenationLayer', 'MLCConvolutionDescriptor', 'MLCConvolutionLayer', 'MLCDevice', 'MLCDropoutLayer', 'MLCEmbeddingDescriptor', 'MLCEmbeddingLayer', 'MLCFullyConnectedLayer', 'MLCGramMatrixLayer', 'MLCGraph', 'MLCGroupNormalizationLayer', 'MLCInferenceGraph', 'MLCInstanceNormalizationLayer', 'MLCLSTMDescriptor', 'MLCLSTMLayer', 'MLCLayer', 'MLCLayerNormalizationLayer', 'MLCLossDescriptor', 'MLCLossLayer', 'MLCMatMulDescriptor', 'MLCMatMulLayer', 'MLCMultiheadAttentionDescriptor', 'MLCMultiheadAttentionLayer', 'MLCPaddingLayer', 'MLCPoolingDescriptor', 'MLCPoolingLayer', 'MLCReductionLayer', 'MLCReshapeLayer', 'MLCSliceLayer', 'MLCSoftmaxLayer', 'MLCSplitLayer', 'MLCTensor', 'MLCTensorData', 'MLCTensorDescriptor', 'MLCTensorOptimizerDeviceData', 'MLCTensorParameter', 'MLCTrainingGraph', 'MLCTransposeLayer', 'MLCUpsampleLayer', 'MLCYOLOLossDescriptor', 'MLCYOLOLossLayer', 'MLDictionaryConstraint', 'MLDictionaryFeatureProvider', 'MLFeatureDescription', 'MLFeatureValue', 'MLImageConstraint', 'MLImageSize', 'MLImageSizeConstraint', 'MLKey', 'MLMetricKey', 'MLModel', 'MLModelCollection', 'MLModelCollectionEntry', 'MLModelConfiguration', 'MLModelDescription', 'MLMultiArray', 'MLMultiArrayConstraint', 'MLMultiArrayShapeConstraint', 'MLNumericConstraint', 'MLParameterDescription', 'MLParameterKey', 'MLPredictionOptions', 'MLSequence', 'MLSequenceConstraint', 'MLTask', 'MLUpdateContext', 'MLUpdateProgressHandlers', 'MLUpdateTask', 'MPChangeLanguageOptionCommandEvent', 'MPChangePlaybackPositionCommand', 'MPChangePlaybackPositionCommandEvent', 'MPChangePlaybackRateCommand', 'MPChangePlaybackRateCommandEvent', 'MPChangeRepeatModeCommand', 'MPChangeRepeatModeCommandEvent', 'MPChangeShuffleModeCommand', 'MPChangeShuffleModeCommandEvent', 'MPContentItem', 'MPFeedbackCommand', 'MPFeedbackCommandEvent', 'MPMediaEntity', 'MPMediaItem', 'MPMediaItemArtwork', 'MPMediaItemCollection', 'MPMediaLibrary', 'MPMediaPickerController', 'MPMediaPlaylist', 'MPMediaPlaylistCreationMetadata', 'MPMediaPredicate', 'MPMediaPropertyPredicate', 'MPMediaQuery', 'MPMediaQuerySection', 'MPMovieAccessLog', 'MPMovieAccessLogEvent', 'MPMovieErrorLog', 'MPMovieErrorLogEvent', 'MPMoviePlayerController', 'MPMoviePlayerViewController', 'MPMusicPlayerApplicationController', 'MPMusicPlayerController', 'MPMusicPlayerControllerMutableQueue', 'MPMusicPlayerControllerQueue', 'MPMusicPlayerMediaItemQueueDescriptor', 'MPMusicPlayerPlayParameters', 'MPMusicPlayerPlayParametersQueueDescriptor', 'MPMusicPlayerQueueDescriptor', 'MPMusicPlayerStoreQueueDescriptor', 'MPNowPlayingInfoCenter', 'MPNowPlayingInfoLanguageOption', 'MPNowPlayingInfoLanguageOptionGroup', 'MPNowPlayingSession', 'MPPlayableContentManager', 'MPPlayableContentManagerContext', 'MPRatingCommand', 'MPRatingCommandEvent', 'MPRemoteCommand', 'MPRemoteCommandCenter', 'MPRemoteCommandEvent', 'MPSGraph', 'MPSGraphConvolution2DOpDescriptor', 'MPSGraphDepthwiseConvolution2DOpDescriptor', 'MPSGraphDevice', 'MPSGraphExecutionDescriptor', 'MPSGraphOperation', 'MPSGraphPooling2DOpDescriptor', 'MPSGraphShapedType', 'MPSGraphTensor', 'MPSGraphTensorData', 'MPSGraphVariableOp', 'MPSeekCommandEvent', 'MPSkipIntervalCommand', 'MPSkipIntervalCommandEvent', 'MPTimedMetadata', 'MPVolumeView', 'MSConversation', 'MSMessage', 'MSMessageLayout', 'MSMessageLiveLayout', 'MSMessageTemplateLayout', 'MSMessagesAppViewController', 'MSServiceAccount', 'MSSession', 'MSSetupSession', 'MSSticker', 'MSStickerBrowserView', 'MSStickerBrowserViewController', 'MSStickerView', 'MTKMesh', 'MTKMeshBuffer', 'MTKMeshBufferAllocator', 'MTKSubmesh', 'MTKTextureLoader', 'MTKView', 'MTLAccelerationStructureBoundingBoxGeometryDescriptor', 'MTLAccelerationStructureDescriptor', 'MTLAccelerationStructureGeometryDescriptor', 'MTLAccelerationStructureTriangleGeometryDescriptor', 'MTLArgument', 'MTLArgumentDescriptor', 'MTLArrayType', 'MTLAttribute', 'MTLAttributeDescriptor', 'MTLAttributeDescriptorArray', 'MTLBinaryArchiveDescriptor', 'MTLBlitPassDescriptor', 'MTLBlitPassSampleBufferAttachmentDescriptor', 'MTLBlitPassSampleBufferAttachmentDescriptorArray', 'MTLBufferLayoutDescriptor', 'MTLBufferLayoutDescriptorArray', 'MTLCaptureDescriptor', 'MTLCaptureManager', 'MTLCommandBufferDescriptor', 'MTLCompileOptions', 'MTLComputePassDescriptor', 'MTLComputePassSampleBufferAttachmentDescriptor', 'MTLComputePassSampleBufferAttachmentDescriptorArray', 'MTLComputePipelineDescriptor', 'MTLComputePipelineReflection', 'MTLCounterSampleBufferDescriptor', 'MTLDepthStencilDescriptor', 'MTLFunctionConstant', 'MTLFunctionConstantValues', 'MTLFunctionDescriptor', 'MTLHeapDescriptor', 'MTLIndirectCommandBufferDescriptor', 'MTLInstanceAccelerationStructureDescriptor', 'MTLIntersectionFunctionDescriptor', 'MTLIntersectionFunctionTableDescriptor', 'MTLLinkedFunctions', 'MTLPipelineBufferDescriptor', 'MTLPipelineBufferDescriptorArray', 'MTLPointerType', 'MTLPrimitiveAccelerationStructureDescriptor', 'MTLRasterizationRateLayerArray', 'MTLRasterizationRateLayerDescriptor', 'MTLRasterizationRateMapDescriptor', 'MTLRasterizationRateSampleArray', 'MTLRenderPassAttachmentDescriptor', 'MTLRenderPassColorAttachmentDescriptor', 'MTLRenderPassColorAttachmentDescriptorArray', 'MTLRenderPassDepthAttachmentDescriptor', 'MTLRenderPassDescriptor', 'MTLRenderPassSampleBufferAttachmentDescriptor', 'MTLRenderPassSampleBufferAttachmentDescriptorArray', 'MTLRenderPassStencilAttachmentDescriptor', 'MTLRenderPipelineColorAttachmentDescriptor', 'MTLRenderPipelineColorAttachmentDescriptorArray', 'MTLRenderPipelineDescriptor', 'MTLRenderPipelineReflection', 'MTLResourceStatePassDescriptor', 'MTLResourceStatePassSampleBufferAttachmentDescriptor', 'MTLResourceStatePassSampleBufferAttachmentDescriptorArray', 'MTLSamplerDescriptor', 'MTLSharedEventHandle', 'MTLSharedEventListener', 'MTLSharedTextureHandle', 'MTLStageInputOutputDescriptor', 'MTLStencilDescriptor', 'MTLStructMember', 'MTLStructType', 'MTLTextureDescriptor', 'MTLTextureReferenceType', 'MTLTileRenderPipelineColorAttachmentDescriptor', 'MTLTileRenderPipelineColorAttachmentDescriptorArray', 'MTLTileRenderPipelineDescriptor', 'MTLType', 'MTLVertexAttribute', 'MTLVertexAttributeDescriptor', 'MTLVertexAttributeDescriptorArray', 'MTLVertexBufferLayoutDescriptor', 'MTLVertexBufferLayoutDescriptorArray', 'MTLVertexDescriptor', 'MTLVisibleFunctionTableDescriptor', 'MXAnimationMetric', 'MXAppExitMetric', 'MXAppLaunchMetric', 'MXAppResponsivenessMetric', 'MXAppRunTimeMetric', 'MXAverage', 'MXBackgroundExitData', 'MXCPUExceptionDiagnostic', 'MXCPUMetric', 'MXCallStackTree', 'MXCellularConditionMetric', 'MXCrashDiagnostic', 'MXDiagnostic', 'MXDiagnosticPayload', 'MXDiskIOMetric', 'MXDiskWriteExceptionDiagnostic', 'MXDisplayMetric', 'MXForegroundExitData', 'MXGPUMetric', 'MXHangDiagnostic', 'MXHistogram', 'MXHistogramBucket', 'MXLocationActivityMetric', 'MXMemoryMetric', 'MXMetaData', 'MXMetric', 'MXMetricManager', 'MXMetricPayload', 'MXNetworkTransferMetric', 'MXSignpostIntervalData', 'MXSignpostMetric', 'MXUnitAveragePixelLuminance', 'MXUnitSignalBars', 'MyClass', 'NCWidgetController', 'NEAppProxyFlow', 'NEAppProxyProvider', 'NEAppProxyProviderManager', 'NEAppProxyTCPFlow', 'NEAppProxyUDPFlow', 'NEAppPushManager', 'NEAppPushProvider', 'NEAppRule', 'NEDNSOverHTTPSSettings', 'NEDNSOverTLSSettings', 'NEDNSProxyManager', 'NEDNSProxyProvider', 'NEDNSProxyProviderProtocol', 'NEDNSSettings', 'NEDNSSettingsManager', 'NEEvaluateConnectionRule', 'NEFilterBrowserFlow', 'NEFilterControlProvider', 'NEFilterControlVerdict', 'NEFilterDataProvider', 'NEFilterDataVerdict', 'NEFilterFlow', 'NEFilterManager', 'NEFilterNewFlowVerdict', 'NEFilterPacketContext', 'NEFilterPacketProvider', 'NEFilterProvider', 'NEFilterProviderConfiguration', 'NEFilterRemediationVerdict', 'NEFilterReport', 'NEFilterRule', 'NEFilterSettings', 'NEFilterSocketFlow', 'NEFilterVerdict', 'NEFlowMetaData', 'NEHotspotConfiguration', 'NEHotspotConfigurationManager', 'NEHotspotEAPSettings', 'NEHotspotHS20Settings', 'NEHotspotHelper', 'NEHotspotHelperCommand', 'NEHotspotHelperResponse', 'NEHotspotNetwork', 'NEIPv4Route', 'NEIPv4Settings', 'NEIPv6Route', 'NEIPv6Settings', 'NENetworkRule', 'NEOnDemandRule', 'NEOnDemandRuleConnect', 'NEOnDemandRuleDisconnect', 'NEOnDemandRuleEvaluateConnection', 'NEOnDemandRuleIgnore', 'NEPacket', 'NEPacketTunnelFlow', 'NEPacketTunnelNetworkSettings', 'NEPacketTunnelProvider', 'NEProvider', 'NEProxyServer', 'NEProxySettings', 'NETransparentProxyManager', 'NETransparentProxyNetworkSettings', 'NETransparentProxyProvider', 'NETunnelNetworkSettings', 'NETunnelProvider', 'NETunnelProviderManager', 'NETunnelProviderProtocol', 'NETunnelProviderSession', 'NEVPNConnection', 'NEVPNIKEv2SecurityAssociationParameters', 'NEVPNManager', 'NEVPNProtocol', 'NEVPNProtocolIKEv2', 'NEVPNProtocolIPSec', 'NFCISO15693CustomCommandConfiguration', 'NFCISO15693ReadMultipleBlocksConfiguration', 'NFCISO15693ReaderSession', 'NFCISO7816APDU', 'NFCNDEFMessage', 'NFCNDEFPayload', 'NFCNDEFReaderSession', 'NFCReaderSession', 'NFCTagCommandConfiguration', 'NFCTagReaderSession', 'NFCVASCommandConfiguration', 'NFCVASReaderSession', 'NFCVASResponse', 'NIConfiguration', 'NIDiscoveryToken', 'NINearbyObject', 'NINearbyPeerConfiguration', 'NISession', 'NKAssetDownload', 'NKIssue', 'NKLibrary', 'NLEmbedding', 'NLGazetteer', 'NLLanguageRecognizer', 'NLModel', 'NLModelConfiguration', 'NLTagger', 'NLTokenizer', 'NSArray', 'NSAssertionHandler', 'NSAsynchronousFetchRequest', 'NSAsynchronousFetchResult', 'NSAtomicStore', 'NSAtomicStoreCacheNode', 'NSAttributeDescription', 'NSAttributedString', 'NSAutoreleasePool', 'NSBatchDeleteRequest', 'NSBatchDeleteResult', 'NSBatchInsertRequest', 'NSBatchInsertResult', 'NSBatchUpdateRequest', 'NSBatchUpdateResult', 'NSBlockOperation', 'NSBundle', 'NSBundleResourceRequest', 'NSByteCountFormatter', 'NSCache', 'NSCachedURLResponse', 'NSCalendar', 'NSCharacterSet', 'NSCoder', 'NSCollectionLayoutAnchor', 'NSCollectionLayoutBoundarySupplementaryItem', 'NSCollectionLayoutDecorationItem', 'NSCollectionLayoutDimension', 'NSCollectionLayoutEdgeSpacing', 'NSCollectionLayoutGroup', 'NSCollectionLayoutGroupCustomItem', 'NSCollectionLayoutItem', 'NSCollectionLayoutSection', 'NSCollectionLayoutSize', 'NSCollectionLayoutSpacing', 'NSCollectionLayoutSupplementaryItem', 'NSComparisonPredicate', 'NSCompoundPredicate', 'NSCondition', 'NSConditionLock', 'NSConstantString', 'NSConstraintConflict', 'NSCoreDataCoreSpotlightDelegate', 'NSCountedSet', 'NSData', 'NSDataAsset', 'NSDataDetector', 'NSDate', 'NSDateComponents', 'NSDateComponentsFormatter', 'NSDateFormatter', 'NSDateInterval', 'NSDateIntervalFormatter', 'NSDecimalNumber', 'NSDecimalNumberHandler', 'NSDerivedAttributeDescription', 'NSDictionary', 'NSDiffableDataSourceSectionSnapshot', 'NSDiffableDataSourceSectionTransaction', 'NSDiffableDataSourceSnapshot', 'NSDiffableDataSourceTransaction', 'NSDimension', 'NSDirectoryEnumerator', 'NSEnergyFormatter', 'NSEntityDescription', 'NSEntityMapping', 'NSEntityMigrationPolicy', 'NSEnumerator', 'NSError', 'NSEvent', 'NSException', 'NSExpression', 'NSExpressionDescription', 'NSExtensionContext', 'NSExtensionItem', 'NSFetchIndexDescription', 'NSFetchIndexElementDescription', 'NSFetchRequest', 'NSFetchRequestExpression', 'NSFetchedPropertyDescription', 'NSFetchedResultsController', 'NSFileAccessIntent', 'NSFileCoordinator', 'NSFileHandle', 'NSFileManager', 'NSFileProviderDomain', 'NSFileProviderExtension', 'NSFileProviderManager', 'NSFileProviderService', 'NSFileSecurity', 'NSFileVersion', 'NSFileWrapper', 'NSFormatter', 'NSHTTPCookie', 'NSHTTPCookieStorage', 'NSHTTPURLResponse', 'NSHashTable', 'NSISO8601DateFormatter', 'NSIncrementalStore', 'NSIncrementalStoreNode', 'NSIndexPath', 'NSIndexSet', 'NSInputStream', 'NSInvocation', 'NSInvocationOperation', 'NSItemProvider', 'NSJSONSerialization', 'NSKeyedArchiver', 'NSKeyedUnarchiver', 'NSLayoutAnchor', 'NSLayoutConstraint', 'NSLayoutDimension', 'NSLayoutManager', 'NSLayoutXAxisAnchor', 'NSLayoutYAxisAnchor', 'NSLengthFormatter', 'NSLinguisticTagger', 'NSListFormatter', 'NSLocale', 'NSLock', 'NSMachPort', 'NSManagedObject', 'NSManagedObjectContext', 'NSManagedObjectID', 'NSManagedObjectModel', 'NSMapTable', 'NSMappingModel', 'NSMassFormatter', 'NSMeasurement', 'NSMeasurementFormatter', 'NSMenuToolbarItem', 'NSMergeConflict', 'NSMergePolicy', 'NSMessagePort', 'NSMetadataItem', 'NSMetadataQuery', 'NSMetadataQueryAttributeValueTuple', 'NSMetadataQueryResultGroup', 'NSMethodSignature', 'NSMigrationManager', 'NSMutableArray', 'NSMutableAttributedString', 'NSMutableCharacterSet', 'NSMutableData', 'NSMutableDictionary', 'NSMutableIndexSet', 'NSMutableOrderedSet', 'NSMutableParagraphStyle', 'NSMutableSet', 'NSMutableString', 'NSMutableURLRequest', 'NSNetService', 'NSNetServiceBrowser', 'NSNotification', 'NSNotificationCenter', 'NSNotificationQueue', 'NSNull', 'NSNumber', 'NSNumberFormatter', 'NSObject', 'NSOperation', 'NSOperationQueue', 'NSOrderedCollectionChange', 'NSOrderedCollectionDifference', 'NSOrderedSet', 'NSOrthography', 'NSOutputStream', 'NSParagraphStyle', 'NSPersistentCloudKitContainer', 'NSPersistentCloudKitContainerEvent', 'NSPersistentCloudKitContainerEventRequest', 'NSPersistentCloudKitContainerEventResult', 'NSPersistentCloudKitContainerOptions', 'NSPersistentContainer', 'NSPersistentHistoryChange', 'NSPersistentHistoryChangeRequest', 'NSPersistentHistoryResult', 'NSPersistentHistoryToken', 'NSPersistentHistoryTransaction', 'NSPersistentStore', 'NSPersistentStoreAsynchronousResult', 'NSPersistentStoreCoordinator', 'NSPersistentStoreDescription', 'NSPersistentStoreRequest', 'NSPersistentStoreResult', 'NSPersonNameComponents', 'NSPersonNameComponentsFormatter', 'NSPipe', 'NSPointerArray', 'NSPointerFunctions', 'NSPort', 'NSPredicate', 'NSProcessInfo', 'NSProgress', 'NSPropertyDescription', 'NSPropertyListSerialization', 'NSPropertyMapping', 'NSProxy', 'NSPurgeableData', 'NSQueryGenerationToken', 'NSRecursiveLock', 'NSRegularExpression', 'NSRelationshipDescription', 'NSRelativeDateTimeFormatter', 'NSRunLoop', 'NSSaveChangesRequest', 'NSScanner', 'NSSecureUnarchiveFromDataTransformer', 'NSSet', 'NSShadow', 'NSSharingServicePickerToolbarItem', 'NSSharingServicePickerTouchBarItem', 'NSSimpleCString', 'NSSocketPort', 'NSSortDescriptor', 'NSStream', 'NSString', 'NSStringDrawingContext', 'NSTextAttachment', 'NSTextCheckingResult', 'NSTextContainer', 'NSTextStorage', 'NSTextTab', 'NSThread', 'NSTimeZone', 'NSTimer', 'NSToolbarItem', 'NSURL', 'NSURLAuthenticationChallenge', 'NSURLCache', 'NSURLComponents', 'NSURLConnection', 'NSURLCredential', 'NSURLCredentialStorage', 'NSURLProtectionSpace', 'NSURLProtocol', 'NSURLQueryItem', 'NSURLRequest', 'NSURLResponse', 'NSURLSession', 'NSURLSessionConfiguration', 'NSURLSessionDataTask', 'NSURLSessionDownloadTask', 'NSURLSessionStreamTask', 'NSURLSessionTask', 'NSURLSessionTaskMetrics', 'NSURLSessionTaskTransactionMetrics', 'NSURLSessionUploadTask', 'NSURLSessionWebSocketMessage', 'NSURLSessionWebSocketTask', 'NSUUID', 'NSUbiquitousKeyValueStore', 'NSUndoManager', 'NSUnit', 'NSUnitAcceleration', 'NSUnitAngle', 'NSUnitArea', 'NSUnitConcentrationMass', 'NSUnitConverter', 'NSUnitConverterLinear', 'NSUnitDispersion', 'NSUnitDuration', 'NSUnitElectricCharge', 'NSUnitElectricCurrent', 'NSUnitElectricPotentialDifference', 'NSUnitElectricResistance', 'NSUnitEnergy', 'NSUnitFrequency', 'NSUnitFuelEfficiency', 'NSUnitIlluminance', 'NSUnitInformationStorage', 'NSUnitLength', 'NSUnitMass', 'NSUnitPower', 'NSUnitPressure', 'NSUnitSpeed', 'NSUnitTemperature', 'NSUnitVolume', 'NSUserActivity', 'NSUserDefaults', 'NSValue', 'NSValueTransformer', 'NSXMLParser', 'NSXPCCoder', 'NSXPCConnection', 'NSXPCInterface', 'NSXPCListener', 'NSXPCListenerEndpoint', 'NWBonjourServiceEndpoint', 'NWEndpoint', 'NWHostEndpoint', 'NWPath', 'NWTCPConnection', 'NWTLSParameters', 'NWUDPSession', 'OSLogEntry', 'OSLogEntryActivity', 'OSLogEntryBoundary', 'OSLogEntryLog', 'OSLogEntrySignpost', 'OSLogEnumerator', 'OSLogMessageComponent', 'OSLogPosition', 'OSLogStore', 'PDFAction', 'PDFActionGoTo', 'PDFActionNamed', 'PDFActionRemoteGoTo', 'PDFActionResetForm', 'PDFActionURL', 'PDFAnnotation', 'PDFAppearanceCharacteristics', 'PDFBorder', 'PDFDestination', 'PDFDocument', 'PDFOutline', 'PDFPage', 'PDFSelection', 'PDFThumbnailView', 'PDFView', 'PHAdjustmentData', 'PHAsset', 'PHAssetChangeRequest', 'PHAssetCollection', 'PHAssetCollectionChangeRequest', 'PHAssetCreationRequest', 'PHAssetResource', 'PHAssetResourceCreationOptions', 'PHAssetResourceManager', 'PHAssetResourceRequestOptions', 'PHCachingImageManager', 'PHChange', 'PHChangeRequest', 'PHCloudIdentifier', 'PHCollection', 'PHCollectionList', 'PHCollectionListChangeRequest', 'PHContentEditingInput', 'PHContentEditingInputRequestOptions', 'PHContentEditingOutput', 'PHEditingExtensionContext', 'PHFetchOptions', 'PHFetchResult', 'PHFetchResultChangeDetails', 'PHImageManager', 'PHImageRequestOptions', 'PHLivePhoto', 'PHLivePhotoEditingContext', 'PHLivePhotoRequestOptions', 'PHLivePhotoView', 'PHObject', 'PHObjectChangeDetails', 'PHObjectPlaceholder', 'PHPhotoLibrary', 'PHPickerConfiguration', 'PHPickerFilter', 'PHPickerResult', 'PHPickerViewController', 'PHProject', 'PHProjectChangeRequest', 'PHVideoRequestOptions', 'PKAddCarKeyPassConfiguration', 'PKAddPassButton', 'PKAddPassesViewController', 'PKAddPaymentPassRequest', 'PKAddPaymentPassRequestConfiguration', 'PKAddPaymentPassViewController', 'PKAddSecureElementPassConfiguration', 'PKAddSecureElementPassViewController', 'PKAddShareablePassConfiguration', 'PKBarcodeEventConfigurationRequest', 'PKBarcodeEventMetadataRequest', 'PKBarcodeEventMetadataResponse', 'PKBarcodeEventSignatureRequest', 'PKBarcodeEventSignatureResponse', 'PKCanvasView', 'PKContact', 'PKDisbursementAuthorizationController', 'PKDisbursementRequest', 'PKDisbursementVoucher', 'PKDrawing', 'PKEraserTool', 'PKFloatRange', 'PKInk', 'PKInkingTool', 'PKIssuerProvisioningExtensionHandler', 'PKIssuerProvisioningExtensionPassEntry', 'PKIssuerProvisioningExtensionPaymentPassEntry', 'PKIssuerProvisioningExtensionStatus', 'PKLabeledValue', 'PKLassoTool', 'PKObject', 'PKPass', 'PKPassLibrary', 'PKPayment', 'PKPaymentAuthorizationController', 'PKPaymentAuthorizationResult', 'PKPaymentAuthorizationViewController', 'PKPaymentButton', 'PKPaymentInformationEventExtension', 'PKPaymentMerchantSession', 'PKPaymentMethod', 'PKPaymentPass', 'PKPaymentRequest', 'PKPaymentRequestMerchantSessionUpdate', 'PKPaymentRequestPaymentMethodUpdate', 'PKPaymentRequestShippingContactUpdate', 'PKPaymentRequestShippingMethodUpdate', 'PKPaymentRequestUpdate', 'PKPaymentSummaryItem', 'PKPaymentToken', 'PKPushCredentials', 'PKPushPayload', 'PKPushRegistry', 'PKSecureElementPass', 'PKShareablePassMetadata', 'PKShippingMethod', 'PKStroke', 'PKStrokePath', 'PKStrokePoint', 'PKSuicaPassProperties', 'PKTool', 'PKToolPicker', 'PKTransitPassProperties', 'QLFileThumbnailRequest', 'QLPreviewController', 'QLThumbnailGenerationRequest', 'QLThumbnailGenerator', 'QLThumbnailProvider', 'QLThumbnailReply', 'QLThumbnailRepresentation', 'RPBroadcastActivityController', 'RPBroadcastActivityViewController', 'RPBroadcastConfiguration', 'RPBroadcastController', 'RPBroadcastHandler', 'RPBroadcastMP4ClipHandler', 'RPBroadcastSampleHandler', 'RPPreviewViewController', 'RPScreenRecorder', 'RPSystemBroadcastPickerView', 'SCNAccelerationConstraint', 'SCNAction', 'SCNAnimation', 'SCNAnimationEvent', 'SCNAnimationPlayer', 'SCNAudioPlayer', 'SCNAudioSource', 'SCNAvoidOccluderConstraint', 'SCNBillboardConstraint', 'SCNBox', 'SCNCamera', 'SCNCameraController', 'SCNCapsule', 'SCNCone', 'SCNConstraint', 'SCNCylinder', 'SCNDistanceConstraint', 'SCNFloor', 'SCNGeometry', 'SCNGeometryElement', 'SCNGeometrySource', 'SCNGeometryTessellator', 'SCNHitTestResult', 'SCNIKConstraint', 'SCNLevelOfDetail', 'SCNLight', 'SCNLookAtConstraint', 'SCNMaterial', 'SCNMaterialProperty', 'SCNMorpher', 'SCNNode', 'SCNParticlePropertyController', 'SCNParticleSystem', 'SCNPhysicsBallSocketJoint', 'SCNPhysicsBehavior', 'SCNPhysicsBody', 'SCNPhysicsConeTwistJoint', 'SCNPhysicsContact', 'SCNPhysicsField', 'SCNPhysicsHingeJoint', 'SCNPhysicsShape', 'SCNPhysicsSliderJoint', 'SCNPhysicsVehicle', 'SCNPhysicsVehicleWheel', 'SCNPhysicsWorld', 'SCNPlane', 'SCNProgram', 'SCNPyramid', 'SCNReferenceNode', 'SCNRenderer', 'SCNReplicatorConstraint', 'SCNScene', 'SCNSceneSource', 'SCNShape', 'SCNSkinner', 'SCNSliderConstraint', 'SCNSphere', 'SCNTechnique', 'SCNText', 'SCNTimingFunction', 'SCNTorus', 'SCNTransaction', 'SCNTransformConstraint', 'SCNTube', 'SCNView', 'SFAcousticFeature', 'SFAuthenticationSession', 'SFContentBlockerManager', 'SFContentBlockerState', 'SFSafariViewController', 'SFSafariViewControllerConfiguration', 'SFSpeechAudioBufferRecognitionRequest', 'SFSpeechRecognitionRequest', 'SFSpeechRecognitionResult', 'SFSpeechRecognitionTask', 'SFSpeechRecognizer', 'SFSpeechURLRecognitionRequest', 'SFTranscription', 'SFTranscriptionSegment', 'SFVoiceAnalytics', 'SK3DNode', 'SKAction', 'SKAdNetwork', 'SKArcadeService', 'SKAttribute', 'SKAttributeValue', 'SKAudioNode', 'SKCameraNode', 'SKCloudServiceController', 'SKCloudServiceSetupViewController', 'SKConstraint', 'SKCropNode', 'SKDownload', 'SKEffectNode', 'SKEmitterNode', 'SKFieldNode', 'SKKeyframeSequence', 'SKLabelNode', 'SKLightNode', 'SKMutablePayment', 'SKMutableTexture', 'SKNode', 'SKOverlay', 'SKOverlayAppClipConfiguration', 'SKOverlayAppConfiguration', 'SKOverlayConfiguration', 'SKOverlayTransitionContext', 'SKPayment', 'SKPaymentDiscount', 'SKPaymentQueue', 'SKPaymentTransaction', 'SKPhysicsBody', 'SKPhysicsContact', 'SKPhysicsJoint', 'SKPhysicsJointFixed', 'SKPhysicsJointLimit', 'SKPhysicsJointPin', 'SKPhysicsJointSliding', 'SKPhysicsJointSpring', 'SKPhysicsWorld', 'SKProduct', 'SKProductDiscount', 'SKProductStorePromotionController', 'SKProductSubscriptionPeriod', 'SKProductsRequest', 'SKProductsResponse', 'SKRange', 'SKReachConstraints', 'SKReceiptRefreshRequest', 'SKReferenceNode', 'SKRegion', 'SKRenderer', 'SKRequest', 'SKScene', 'SKShader', 'SKShapeNode', 'SKSpriteNode', 'SKStoreProductViewController', 'SKStoreReviewController', 'SKStorefront', 'SKTexture', 'SKTextureAtlas', 'SKTileDefinition', 'SKTileGroup', 'SKTileGroupRule', 'SKTileMapNode', 'SKTileSet', 'SKTransformNode', 'SKTransition', 'SKUniform', 'SKVideoNode', 'SKView', 'SKWarpGeometry', 'SKWarpGeometryGrid', 'SLComposeServiceViewController', 'SLComposeSheetConfigurationItem', 'SLComposeViewController', 'SLRequest', 'SNAudioFileAnalyzer', 'SNAudioStreamAnalyzer', 'SNClassification', 'SNClassificationResult', 'SNClassifySoundRequest', 'SRAmbientLightSample', 'SRApplicationUsage', 'SRDeletionRecord', 'SRDevice', 'SRDeviceUsageReport', 'SRFetchRequest', 'SRFetchResult', 'SRKeyboardMetrics', 'SRKeyboardProbabilityMetric', 'SRMessagesUsageReport', 'SRNotificationUsage', 'SRPhoneUsageReport', 'SRSensorReader', 'SRVisit', 'SRWebUsage', 'SRWristDetection', 'SSReadingList', 'STScreenTimeConfiguration', 'STScreenTimeConfigurationObserver', 'STWebHistory', 'STWebpageController', 'TKBERTLVRecord', 'TKCompactTLVRecord', 'TKSimpleTLVRecord', 'TKSmartCard', 'TKSmartCardATR', 'TKSmartCardATRInterfaceGroup', 'TKSmartCardPINFormat', 'TKSmartCardSlot', 'TKSmartCardSlotManager', 'TKSmartCardToken', 'TKSmartCardTokenDriver', 'TKSmartCardTokenSession', 'TKSmartCardUserInteraction', 'TKSmartCardUserInteractionForPINOperation', 'TKSmartCardUserInteractionForSecurePINChange', 'TKSmartCardUserInteractionForSecurePINVerification', 'TKTLVRecord', 'TKToken', 'TKTokenAuthOperation', 'TKTokenConfiguration', 'TKTokenDriver', 'TKTokenDriverConfiguration', 'TKTokenKeyAlgorithm', 'TKTokenKeyExchangeParameters', 'TKTokenKeychainCertificate', 'TKTokenKeychainContents', 'TKTokenKeychainItem', 'TKTokenKeychainKey', 'TKTokenPasswordAuthOperation', 'TKTokenSession', 'TKTokenSmartCardPINAuthOperation', 'TKTokenWatcher', 'TWRequest', 'TWTweetComposeViewController', 'UIAcceleration', 'UIAccelerometer', 'UIAccessibilityCustomAction', 'UIAccessibilityCustomRotor', 'UIAccessibilityCustomRotorItemResult', 'UIAccessibilityCustomRotorSearchPredicate', 'UIAccessibilityElement', 'UIAccessibilityLocationDescriptor', 'UIAction', 'UIActionSheet', 'UIActivity', 'UIActivityIndicatorView', 'UIActivityItemProvider', 'UIActivityItemsConfiguration', 'UIActivityViewController', 'UIAlertAction', 'UIAlertController', 'UIAlertView', 'UIApplication', 'UIApplicationShortcutIcon', 'UIApplicationShortcutItem', 'UIAttachmentBehavior', 'UIBackgroundConfiguration', 'UIBarAppearance', 'UIBarButtonItem', 'UIBarButtonItemAppearance', 'UIBarButtonItemGroup', 'UIBarButtonItemStateAppearance', 'UIBarItem', 'UIBezierPath', 'UIBlurEffect', 'UIButton', 'UICellAccessory', 'UICellAccessoryCheckmark', 'UICellAccessoryCustomView', 'UICellAccessoryDelete', 'UICellAccessoryDisclosureIndicator', 'UICellAccessoryInsert', 'UICellAccessoryLabel', 'UICellAccessoryMultiselect', 'UICellAccessoryOutlineDisclosure', 'UICellAccessoryReorder', 'UICellConfigurationState', 'UICloudSharingController', 'UICollectionLayoutListConfiguration', 'UICollectionReusableView', 'UICollectionView', 'UICollectionViewCell', 'UICollectionViewCellRegistration', 'UICollectionViewCompositionalLayout', 'UICollectionViewCompositionalLayoutConfiguration', 'UICollectionViewController', 'UICollectionViewDiffableDataSource', 'UICollectionViewDiffableDataSourceReorderingHandlers', 'UICollectionViewDiffableDataSourceSectionSnapshotHandlers', 'UICollectionViewDropPlaceholder', 'UICollectionViewDropProposal', 'UICollectionViewFlowLayout', 'UICollectionViewFlowLayoutInvalidationContext', 'UICollectionViewFocusUpdateContext', 'UICollectionViewLayout', 'UICollectionViewLayoutAttributes', 'UICollectionViewLayoutInvalidationContext', 'UICollectionViewListCell', 'UICollectionViewPlaceholder', 'UICollectionViewSupplementaryRegistration', 'UICollectionViewTransitionLayout', 'UICollectionViewUpdateItem', 'UICollisionBehavior', 'UIColor', 'UIColorPickerViewController', 'UIColorWell', 'UICommand', 'UICommandAlternate', 'UIContextMenuConfiguration', 'UIContextMenuInteraction', 'UIContextualAction', 'UIControl', 'UICubicTimingParameters', 'UIDatePicker', 'UIDeferredMenuElement', 'UIDevice', 'UIDictationPhrase', 'UIDocument', 'UIDocumentBrowserAction', 'UIDocumentBrowserTransitionController', 'UIDocumentBrowserViewController', 'UIDocumentInteractionController', 'UIDocumentMenuViewController', 'UIDocumentPickerExtensionViewController', 'UIDocumentPickerViewController', 'UIDragInteraction', 'UIDragItem', 'UIDragPreview', 'UIDragPreviewParameters', 'UIDragPreviewTarget', 'UIDropInteraction', 'UIDropProposal', 'UIDynamicAnimator', 'UIDynamicBehavior', 'UIDynamicItemBehavior', 'UIDynamicItemGroup', 'UIEvent', 'UIFeedbackGenerator', 'UIFieldBehavior', 'UIFocusAnimationCoordinator', 'UIFocusDebugger', 'UIFocusGuide', 'UIFocusMovementHint', 'UIFocusSystem', 'UIFocusUpdateContext', 'UIFont', 'UIFontDescriptor', 'UIFontMetrics', 'UIFontPickerViewController', 'UIFontPickerViewControllerConfiguration', 'UIGestureRecognizer', 'UIGraphicsImageRenderer', 'UIGraphicsImageRendererContext', 'UIGraphicsImageRendererFormat', 'UIGraphicsPDFRenderer', 'UIGraphicsPDFRendererContext', 'UIGraphicsPDFRendererFormat', 'UIGraphicsRenderer', 'UIGraphicsRendererContext', 'UIGraphicsRendererFormat', 'UIGravityBehavior', 'UIHoverGestureRecognizer', 'UIImage', 'UIImageAsset', 'UIImageConfiguration', 'UIImagePickerController', 'UIImageSymbolConfiguration', 'UIImageView', 'UIImpactFeedbackGenerator', 'UIIndirectScribbleInteraction', 'UIInputView', 'UIInputViewController', 'UIInterpolatingMotionEffect', 'UIKey', 'UIKeyCommand', 'UILabel', 'UILargeContentViewerInteraction', 'UILayoutGuide', 'UILexicon', 'UILexiconEntry', 'UIListContentConfiguration', 'UIListContentImageProperties', 'UIListContentTextProperties', 'UIListContentView', 'UILocalNotification', 'UILocalizedIndexedCollation', 'UILongPressGestureRecognizer', 'UIManagedDocument', 'UIMarkupTextPrintFormatter', 'UIMenu', 'UIMenuController', 'UIMenuElement', 'UIMenuItem', 'UIMenuSystem', 'UIMotionEffect', 'UIMotionEffectGroup', 'UIMutableApplicationShortcutItem', 'UIMutableUserNotificationAction', 'UIMutableUserNotificationCategory', 'UINavigationBar', 'UINavigationBarAppearance', 'UINavigationController', 'UINavigationItem', 'UINib', 'UINotificationFeedbackGenerator', 'UIOpenURLContext', 'UIPageControl', 'UIPageViewController', 'UIPanGestureRecognizer', 'UIPasteConfiguration', 'UIPasteboard', 'UIPencilInteraction', 'UIPercentDrivenInteractiveTransition', 'UIPickerView', 'UIPinchGestureRecognizer', 'UIPointerEffect', 'UIPointerHighlightEffect', 'UIPointerHoverEffect', 'UIPointerInteraction', 'UIPointerLiftEffect', 'UIPointerLockState', 'UIPointerRegion', 'UIPointerRegionRequest', 'UIPointerShape', 'UIPointerStyle', 'UIPopoverBackgroundView', 'UIPopoverController', 'UIPopoverPresentationController', 'UIPresentationController', 'UIPress', 'UIPressesEvent', 'UIPreviewAction', 'UIPreviewActionGroup', 'UIPreviewInteraction', 'UIPreviewParameters', 'UIPreviewTarget', 'UIPrintFormatter', 'UIPrintInfo', 'UIPrintInteractionController', 'UIPrintPageRenderer', 'UIPrintPaper', 'UIPrinter', 'UIPrinterPickerController', 'UIProgressView', 'UIPushBehavior', 'UIReferenceLibraryViewController', 'UIRefreshControl', 'UIRegion', 'UIResponder', 'UIRotationGestureRecognizer', 'UIScene', 'UISceneActivationConditions', 'UISceneActivationRequestOptions', 'UISceneConfiguration', 'UISceneConnectionOptions', 'UISceneDestructionRequestOptions', 'UISceneOpenExternalURLOptions', 'UISceneOpenURLOptions', 'UISceneSession', 'UISceneSizeRestrictions', 'UIScreen', 'UIScreenEdgePanGestureRecognizer', 'UIScreenMode', 'UIScreenshotService', 'UIScribbleInteraction', 'UIScrollView', 'UISearchBar', 'UISearchContainerViewController', 'UISearchController', 'UISearchDisplayController', 'UISearchSuggestionItem', 'UISearchTextField', 'UISearchToken', 'UISegmentedControl', 'UISelectionFeedbackGenerator', 'UISimpleTextPrintFormatter', 'UISlider', 'UISnapBehavior', 'UISplitViewController', 'UISpringLoadedInteraction', 'UISpringTimingParameters', 'UIStackView', 'UIStatusBarManager', 'UIStepper', 'UIStoryboard', 'UIStoryboardPopoverSegue', 'UIStoryboardSegue', 'UIStoryboardUnwindSegueSource', 'UISwipeActionsConfiguration', 'UISwipeGestureRecognizer', 'UISwitch', 'UITabBar', 'UITabBarAppearance', 'UITabBarController', 'UITabBarItem', 'UITabBarItemAppearance', 'UITabBarItemStateAppearance', 'UITableView', 'UITableViewCell', 'UITableViewController', 'UITableViewDiffableDataSource', 'UITableViewDropPlaceholder', 'UITableViewDropProposal', 'UITableViewFocusUpdateContext', 'UITableViewHeaderFooterView', 'UITableViewPlaceholder', 'UITableViewRowAction', 'UITapGestureRecognizer', 'UITargetedDragPreview', 'UITargetedPreview', 'UITextChecker', 'UITextDragPreviewRenderer', 'UITextDropProposal', 'UITextField', 'UITextFormattingCoordinator', 'UITextInputAssistantItem', 'UITextInputMode', 'UITextInputPasswordRules', 'UITextInputStringTokenizer', 'UITextInteraction', 'UITextPlaceholder', 'UITextPosition', 'UITextRange', 'UITextSelectionRect', 'UITextView', 'UITitlebar', 'UIToolbar', 'UIToolbarAppearance', 'UITouch', 'UITraitCollection', 'UIUserNotificationAction', 'UIUserNotificationCategory', 'UIUserNotificationSettings', 'UIVibrancyEffect', 'UIVideoEditorController', 'UIView', 'UIViewConfigurationState', 'UIViewController', 'UIViewPrintFormatter', 'UIViewPropertyAnimator', 'UIVisualEffect', 'UIVisualEffectView', 'UIWebView', 'UIWindow', 'UIWindowScene', 'UIWindowSceneDestructionRequestOptions', 'UNCalendarNotificationTrigger', 'UNLocationNotificationTrigger', 'UNMutableNotificationContent', 'UNNotification', 'UNNotificationAction', 'UNNotificationAttachment', 'UNNotificationCategory', 'UNNotificationContent', 'UNNotificationRequest', 'UNNotificationResponse', 'UNNotificationServiceExtension', 'UNNotificationSettings', 'UNNotificationSound', 'UNNotificationTrigger', 'UNPushNotificationTrigger', 'UNTextInputNotificationAction', 'UNTextInputNotificationResponse', 'UNTimeIntervalNotificationTrigger', 'UNUserNotificationCenter', 'UTType', 'VNBarcodeObservation', 'VNCircle', 'VNClassificationObservation', 'VNClassifyImageRequest', 'VNContour', 'VNContoursObservation', 'VNCoreMLFeatureValueObservation', 'VNCoreMLModel', 'VNCoreMLRequest', 'VNDetectBarcodesRequest', 'VNDetectContoursRequest', 'VNDetectFaceCaptureQualityRequest', 'VNDetectFaceLandmarksRequest', 'VNDetectFaceRectanglesRequest', 'VNDetectHorizonRequest', 'VNDetectHumanBodyPoseRequest', 'VNDetectHumanHandPoseRequest', 'VNDetectHumanRectanglesRequest', 'VNDetectRectanglesRequest', 'VNDetectTextRectanglesRequest', 'VNDetectTrajectoriesRequest', 'VNDetectedObjectObservation', 'VNDetectedPoint', 'VNDocumentCameraScan', 'VNDocumentCameraViewController', 'VNFaceLandmarkRegion', 'VNFaceLandmarkRegion2D', 'VNFaceLandmarks', 'VNFaceLandmarks2D', 'VNFaceObservation', 'VNFeaturePrintObservation', 'VNGenerateAttentionBasedSaliencyImageRequest', 'VNGenerateImageFeaturePrintRequest', 'VNGenerateObjectnessBasedSaliencyImageRequest', 'VNGenerateOpticalFlowRequest', 'VNGeometryUtils', 'VNHomographicImageRegistrationRequest', 'VNHorizonObservation', 'VNHumanBodyPoseObservation', 'VNHumanHandPoseObservation', 'VNImageAlignmentObservation', 'VNImageBasedRequest', 'VNImageHomographicAlignmentObservation', 'VNImageRegistrationRequest', 'VNImageRequestHandler', 'VNImageTranslationAlignmentObservation', 'VNObservation', 'VNPixelBufferObservation', 'VNPoint', 'VNRecognizeAnimalsRequest', 'VNRecognizeTextRequest', 'VNRecognizedObjectObservation', 'VNRecognizedPoint', 'VNRecognizedPointsObservation', 'VNRecognizedText', 'VNRecognizedTextObservation', 'VNRectangleObservation', 'VNRequest', 'VNSaliencyImageObservation', 'VNSequenceRequestHandler', 'VNStatefulRequest', 'VNTargetedImageRequest', 'VNTextObservation', 'VNTrackObjectRequest', 'VNTrackRectangleRequest', 'VNTrackingRequest', 'VNTrajectoryObservation', 'VNTranslationalImageRegistrationRequest', 'VNVector', 'VNVideoProcessor', 'VNVideoProcessorCadence', 'VNVideoProcessorFrameRateCadence', 'VNVideoProcessorRequestProcessingOptions', 'VNVideoProcessorTimeIntervalCadence', 'VSAccountApplicationProvider', 'VSAccountManager', 'VSAccountManagerResult', 'VSAccountMetadata', 'VSAccountMetadataRequest', 'VSAccountProviderResponse', 'VSSubscription', 'VSSubscriptionRegistrationCenter', 'WCSession', 'WCSessionFile', 'WCSessionFileTransfer', 'WCSessionUserInfoTransfer', 'WKBackForwardList', 'WKBackForwardListItem', 'WKContentRuleList', 'WKContentRuleListStore', 'WKContentWorld', 'WKContextMenuElementInfo', 'WKFindConfiguration', 'WKFindResult', 'WKFrameInfo', 'WKHTTPCookieStore', 'WKNavigation', 'WKNavigationAction', 'WKNavigationResponse', 'WKOpenPanelParameters', 'WKPDFConfiguration', 'WKPreferences', 'WKPreviewElementInfo', 'WKProcessPool', 'WKScriptMessage', 'WKSecurityOrigin', 'WKSnapshotConfiguration', 'WKUserContentController', 'WKUserScript', 'WKWebView', 'WKWebViewConfiguration', 'WKWebpagePreferences', 'WKWebsiteDataRecord', 'WKWebsiteDataStore', 'WKWindowFeatures', '__EntityAccessibilityWrapper'}
+COCOA_PROTOCOLS = {'ABNewPersonViewControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'ABPersonViewControllerDelegate', 'ABUnknownPersonViewControllerDelegate', 'ADActionViewControllerChildInterface', 'ADActionViewControllerInterface', 'ADBannerViewDelegate', 'ADInterstitialAdDelegate', 'AEAssessmentSessionDelegate', 'ARAnchorCopying', 'ARCoachingOverlayViewDelegate', 'ARSCNViewDelegate', 'ARSKViewDelegate', 'ARSessionDelegate', 'ARSessionObserver', 'ARSessionProviding', 'ARTrackable', 'ASAccountAuthenticationModificationControllerDelegate', 'ASAccountAuthenticationModificationControllerPresentationContextProviding', 'ASAuthorizationControllerDelegate', 'ASAuthorizationControllerPresentationContextProviding', 'ASAuthorizationCredential', 'ASAuthorizationProvider', 'ASAuthorizationProviderExtensionAuthorizationRequestHandler', 'ASWebAuthenticationPresentationContextProviding', 'ASWebAuthenticationSessionRequestDelegate', 'ASWebAuthenticationSessionWebBrowserSessionHandling', 'AUAudioUnitFactory', 'AVAssetDownloadDelegate', 'AVAssetResourceLoaderDelegate', 'AVAssetWriterDelegate', 'AVAsynchronousKeyValueLoading', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'AVCaptureDataOutputSynchronizerDelegate', 'AVCaptureDepthDataOutputDelegate', 'AVCaptureFileOutputDelegate', 'AVCaptureFileOutputRecordingDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'AVCapturePhotoCaptureDelegate', 'AVCapturePhotoFileDataRepresentationCustomizer', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'AVContentKeyRecipient', 'AVContentKeySessionDelegate', 'AVFragmentMinding', 'AVPictureInPictureControllerDelegate', 'AVPlayerItemLegibleOutputPushDelegate', 'AVPlayerItemMetadataCollectorPushDelegate', 'AVPlayerItemMetadataOutputPushDelegate', 'AVPlayerItemOutputPullDelegate', 'AVPlayerItemOutputPushDelegate', 'AVPlayerViewControllerDelegate', 'AVQueuedSampleBufferRendering', 'AVRoutePickerViewDelegate', 'AVVideoCompositing', 'AVVideoCompositionInstruction', 'AVVideoCompositionValidationHandling', 'AXCustomContentProvider', 'CAAction', 'CAAnimationDelegate', 'CALayerDelegate', 'CAMediaTiming', 'CAMetalDrawable', 'CBCentralManagerDelegate', 'CBPeripheralDelegate', 'CBPeripheralManagerDelegate', 'CHHapticAdvancedPatternPlayer', 'CHHapticDeviceCapability', 'CHHapticParameterAttributes', 'CHHapticPatternPlayer', 'CIAccordionFoldTransition', 'CIAffineClamp', 'CIAffineTile', 'CIAreaAverage', 'CIAreaHistogram', 'CIAreaMaximum', 'CIAreaMaximumAlpha', 'CIAreaMinMax', 'CIAreaMinMaxRed', 'CIAreaMinimum', 'CIAreaMinimumAlpha', 'CIAreaReductionFilter', 'CIAttributedTextImageGenerator', 'CIAztecCodeGenerator', 'CIBarcodeGenerator', 'CIBarsSwipeTransition', 'CIBicubicScaleTransform', 'CIBlendWithMask', 'CIBloom', 'CIBokehBlur', 'CIBoxBlur', 'CIBumpDistortion', 'CIBumpDistortionLinear', 'CICMYKHalftone', 'CICheckerboardGenerator', 'CICircleSplashDistortion', 'CICircularScreen', 'CICircularWrap', 'CICode128BarcodeGenerator', 'CIColorAbsoluteDifference', 'CIColorClamp', 'CIColorControls', 'CIColorCrossPolynomial', 'CIColorCube', 'CIColorCubeWithColorSpace', 'CIColorCubesMixedWithMask', 'CIColorCurves', 'CIColorInvert', 'CIColorMap', 'CIColorMatrix', 'CIColorMonochrome', 'CIColorPolynomial', 'CIColorPosterize', 'CIColorThreshold', 'CIColorThresholdOtsu', 'CIColumnAverage', 'CIComicEffect', 'CICompositeOperation', 'CIConvolution', 'CICopyMachineTransition', 'CICoreMLModel', 'CICrystallize', 'CIDepthOfField', 'CIDepthToDisparity', 'CIDiscBlur', 'CIDisintegrateWithMaskTransition', 'CIDisparityToDepth', 'CIDisplacementDistortion', 'CIDissolveTransition', 'CIDither', 'CIDocumentEnhancer', 'CIDotScreen', 'CIDroste', 'CIEdgePreserveUpsample', 'CIEdgeWork', 'CIEdges', 'CIEightfoldReflectedTile', 'CIExposureAdjust', 'CIFalseColor', 'CIFilter', 'CIFilterConstructor', 'CIFlashTransition', 'CIFourCoordinateGeometryFilter', 'CIFourfoldReflectedTile', 'CIFourfoldRotatedTile', 'CIFourfoldTranslatedTile', 'CIGaborGradients', 'CIGammaAdjust', 'CIGaussianBlur', 'CIGaussianGradient', 'CIGlassDistortion', 'CIGlassLozenge', 'CIGlideReflectedTile', 'CIGloom', 'CIHatchedScreen', 'CIHeightFieldFromMask', 'CIHexagonalPixellate', 'CIHighlightShadowAdjust', 'CIHistogramDisplay', 'CIHoleDistortion', 'CIHueAdjust', 'CIHueSaturationValueGradient', 'CIImageProcessorInput', 'CIImageProcessorOutput', 'CIKMeans', 'CIKaleidoscope', 'CIKeystoneCorrectionCombined', 'CIKeystoneCorrectionHorizontal', 'CIKeystoneCorrectionVertical', 'CILabDeltaE', 'CILanczosScaleTransform', 'CILenticularHaloGenerator', 'CILightTunnel', 'CILineOverlay', 'CILineScreen', 'CILinearGradient', 'CILinearToSRGBToneCurve', 'CIMaskToAlpha', 'CIMaskedVariableBlur', 'CIMaximumComponent', 'CIMedian', 'CIMeshGenerator', 'CIMinimumComponent', 'CIMix', 'CIModTransition', 'CIMorphologyGradient', 'CIMorphologyMaximum', 'CIMorphologyMinimum', 'CIMorphologyRectangleMaximum', 'CIMorphologyRectangleMinimum', 'CIMotionBlur', 'CINinePartStretched', 'CINinePartTiled', 'CINoiseReduction', 'CIOpTile', 'CIPDF417BarcodeGenerator', 'CIPageCurlTransition', 'CIPageCurlWithShadowTransition', 'CIPaletteCentroid', 'CIPalettize', 'CIParallelogramTile', 'CIPerspectiveCorrection', 'CIPerspectiveRotate', 'CIPerspectiveTile', 'CIPerspectiveTransform', 'CIPerspectiveTransformWithExtent', 'CIPhotoEffect', 'CIPinchDistortion', 'CIPixellate', 'CIPlugInRegistration', 'CIPointillize', 'CIQRCodeGenerator', 'CIRadialGradient', 'CIRandomGenerator', 'CIRippleTransition', 'CIRoundedRectangleGenerator', 'CIRowAverage', 'CISRGBToneCurveToLinear', 'CISaliencyMap', 'CISepiaTone', 'CIShadedMaterial', 'CISharpenLuminance', 'CISixfoldReflectedTile', 'CISixfoldRotatedTile', 'CISmoothLinearGradient', 'CISpotColor', 'CISpotLight', 'CIStarShineGenerator', 'CIStraighten', 'CIStretchCrop', 'CIStripesGenerator', 'CISunbeamsGenerator', 'CISwipeTransition', 'CITemperatureAndTint', 'CITextImageGenerator', 'CIThermal', 'CIToneCurve', 'CITorusLensDistortion', 'CITransitionFilter', 'CITriangleKaleidoscope', 'CITriangleTile', 'CITwelvefoldReflectedTile', 'CITwirlDistortion', 'CIUnsharpMask', 'CIVibrance', 'CIVignette', 'CIVignetteEffect', 'CIVortexDistortion', 'CIWhitePointAdjust', 'CIXRay', 'CIZoomBlur', 'CKRecordKeyValueSetting', 'CKRecordValue', 'CLKComplicationDataSource', 'CLLocationManagerDelegate', 'CLSContextProvider', 'CLSDataStoreDelegate', 'CMFallDetectionDelegate', 'CMHeadphoneMotionManagerDelegate', 'CNChangeHistoryEventVisitor', 'CNContactPickerDelegate', 'CNContactViewControllerDelegate', 'CNKeyDescriptor', 'CPApplicationDelegate', 'CPBarButtonProviding', 'CPInterfaceControllerDelegate', 'CPListTemplateDelegate', 'CPListTemplateItem', 'CPMapTemplateDelegate', 'CPNowPlayingTemplateObserver', 'CPPointOfInterestTemplateDelegate', 'CPSearchTemplateDelegate', 'CPSelectableListItem', 'CPSessionConfigurationDelegate', 'CPTabBarTemplateDelegate', 'CPTemplateApplicationDashboardSceneDelegate', 'CPTemplateApplicationSceneDelegate', 'CSSearchableIndexDelegate', 'CTSubscriberDelegate', 'CTTelephonyNetworkInfoDelegate', 'CXCallDirectoryExtensionContextDelegate', 'CXCallObserverDelegate', 'CXProviderDelegate', 'EAAccessoryDelegate', 'EAGLDrawable', 'EAWiFiUnconfiguredAccessoryBrowserDelegate', 'EKCalendarChooserDelegate', 'EKEventEditViewDelegate', 'EKEventViewDelegate', 'GCDevice', 'GKAchievementViewControllerDelegate', 'GKAgentDelegate', 'GKChallengeEventHandlerDelegate', 'GKChallengeListener', 'GKFriendRequestComposeViewControllerDelegate', 'GKGameCenterControllerDelegate', 'GKGameModel', 'GKGameModelPlayer', 'GKGameModelUpdate', 'GKGameSessionEventListener', 'GKGameSessionSharingViewControllerDelegate', 'GKInviteEventListener', 'GKLeaderboardViewControllerDelegate', 'GKLocalPlayerListener', 'GKMatchDelegate', 'GKMatchmakerViewControllerDelegate', 'GKPeerPickerControllerDelegate', 'GKRandom', 'GKSavedGameListener', 'GKSceneRootNodeType', 'GKSessionDelegate', 'GKStrategist', 'GKTurnBasedEventListener', 'GKTurnBasedMatchmakerViewControllerDelegate', 'GKVoiceChatClient', 'GLKNamedEffect', 'GLKViewControllerDelegate', 'GLKViewDelegate', 'HKLiveWorkoutBuilderDelegate', 'HKWorkoutSessionDelegate', 'HMAccessoryBrowserDelegate', 'HMAccessoryDelegate', 'HMCameraSnapshotControlDelegate', 'HMCameraStreamControlDelegate', 'HMHomeDelegate', 'HMHomeManagerDelegate', 'HMNetworkConfigurationProfileDelegate', 'ICCameraDeviceDelegate', 'ICCameraDeviceDownloadDelegate', 'ICDeviceBrowserDelegate', 'ICDeviceDelegate', 'ICScannerDeviceDelegate', 'ILMessageFilterQueryHandling', 'INActivateCarSignalIntentHandling', 'INAddMediaIntentHandling', 'INAddTasksIntentHandling', 'INAppendToNoteIntentHandling', 'INBookRestaurantReservationIntentHandling', 'INCallsDomainHandling', 'INCancelRideIntentHandling', 'INCancelWorkoutIntentHandling', 'INCarCommandsDomainHandling', 'INCarPlayDomainHandling', 'INCreateNoteIntentHandling', 'INCreateTaskListIntentHandling', 'INDeleteTasksIntentHandling', 'INEndWorkoutIntentHandling', 'INGetAvailableRestaurantReservationBookingDefaultsIntentHandling', 'INGetAvailableRestaurantReservationBookingsIntentHandling', 'INGetCarLockStatusIntentHandling', 'INGetCarPowerLevelStatusIntentHandling', 'INGetCarPowerLevelStatusIntentResponseObserver', 'INGetRestaurantGuestIntentHandling', 'INGetRideStatusIntentHandling', 'INGetRideStatusIntentResponseObserver', 'INGetUserCurrentRestaurantReservationBookingsIntentHandling', 'INGetVisualCodeIntentHandling', 'INIntentHandlerProviding', 'INListCarsIntentHandling', 'INListRideOptionsIntentHandling', 'INMessagesDomainHandling', 'INNotebookDomainHandling', 'INPauseWorkoutIntentHandling', 'INPayBillIntentHandling', 'INPaymentsDomainHandling', 'INPhotosDomainHandling', 'INPlayMediaIntentHandling', 'INRadioDomainHandling', 'INRequestPaymentIntentHandling', 'INRequestRideIntentHandling', 'INResumeWorkoutIntentHandling', 'INRidesharingDomainHandling', 'INSaveProfileInCarIntentHandling', 'INSearchCallHistoryIntentHandling', 'INSearchForAccountsIntentHandling', 'INSearchForBillsIntentHandling', 'INSearchForMediaIntentHandling', 'INSearchForMessagesIntentHandling', 'INSearchForNotebookItemsIntentHandling', 'INSearchForPhotosIntentHandling', 'INSendMessageIntentHandling', 'INSendPaymentIntentHandling', 'INSendRideFeedbackIntentHandling', 'INSetAudioSourceInCarIntentHandling', 'INSetCarLockStatusIntentHandling', 'INSetClimateSettingsInCarIntentHandling', 'INSetDefrosterSettingsInCarIntentHandling', 'INSetMessageAttributeIntentHandling', 'INSetProfileInCarIntentHandling', 'INSetRadioStationIntentHandling', 'INSetSeatSettingsInCarIntentHandling', 'INSetTaskAttributeIntentHandling', 'INSnoozeTasksIntentHandling', 'INSpeakable', 'INStartAudioCallIntentHandling', 'INStartCallIntentHandling', 'INStartPhotoPlaybackIntentHandling', 'INStartVideoCallIntentHandling', 'INStartWorkoutIntentHandling', 'INTransferMoneyIntentHandling', 'INUIAddVoiceShortcutButtonDelegate', 'INUIAddVoiceShortcutViewControllerDelegate', 'INUIEditVoiceShortcutViewControllerDelegate', 'INUIHostedViewControlling', 'INUIHostedViewSiriProviding', 'INUpdateMediaAffinityIntentHandling', 'INVisualCodeDomainHandling', 'INWorkoutsDomainHandling', 'JSExport', 'MCAdvertiserAssistantDelegate', 'MCBrowserViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'MCNearbyServiceBrowserDelegate', 'MCSessionDelegate', 'MDLAssetResolver', 'MDLComponent', 'MDLJointAnimation', 'MDLLightProbeIrradianceDataSource', 'MDLMeshBuffer', 'MDLMeshBufferAllocator', 'MDLMeshBufferZone', 'MDLNamed', 'MDLObjectContainerComponent', 'MDLTransformComponent', 'MDLTransformOp', 'MFMailComposeViewControllerDelegate', 'MFMessageComposeViewControllerDelegate', 'MIDICIProfileResponderDelegate', 'MKAnnotation', 'MKGeoJSONObject', 'MKLocalSearchCompleterDelegate', 'MKMapViewDelegate', 'MKOverlay', 'MKReverseGeocoderDelegate', 'MLBatchProvider', 'MLCustomLayer', 'MLCustomModel', 'MLFeatureProvider', 'MLWritable', 'MPMediaPickerControllerDelegate', 'MPMediaPlayback', 'MPNowPlayingSessionDelegate', 'MPPlayableContentDataSource', 'MPPlayableContentDelegate', 'MPSystemMusicPlayerController', 'MSAuthenticationPresentationContext', 'MSMessagesAppTranscriptPresentation', 'MSStickerBrowserViewDataSource', 'MTKViewDelegate', 'MTLAccelerationStructure', 'MTLAccelerationStructureCommandEncoder', 'MTLArgumentEncoder', 'MTLBinaryArchive', 'MTLBlitCommandEncoder', 'MTLBuffer', 'MTLCaptureScope', 'MTLCommandBuffer', 'MTLCommandBufferEncoderInfo', 'MTLCommandEncoder', 'MTLCommandQueue', 'MTLComputeCommandEncoder', 'MTLComputePipelineState', 'MTLCounter', 'MTLCounterSampleBuffer', 'MTLCounterSet', 'MTLDepthStencilState', 'MTLDevice', 'MTLDrawable', 'MTLDynamicLibrary', 'MTLEvent', 'MTLFence', 'MTLFunction', 'MTLFunctionHandle', 'MTLFunctionLog', 'MTLFunctionLogDebugLocation', 'MTLHeap', 'MTLIndirectCommandBuffer', 'MTLIndirectComputeCommand', 'MTLIndirectComputeCommandEncoder', 'MTLIndirectRenderCommand', 'MTLIndirectRenderCommandEncoder', 'MTLIntersectionFunctionTable', 'MTLLibrary', 'MTLLogContainer', 'MTLParallelRenderCommandEncoder', 'MTLRasterizationRateMap', 'MTLRenderCommandEncoder', 'MTLRenderPipelineState', 'MTLResource', 'MTLResourceStateCommandEncoder', 'MTLSamplerState', 'MTLSharedEvent', 'MTLTexture', 'MTLVisibleFunctionTable', 'MXMetricManagerSubscriber', 'MyClassJavaScriptMethods', 'NCWidgetProviding', 'NEAppPushDelegate', 'NFCFeliCaTag', 'NFCISO15693Tag', 'NFCISO7816Tag', 'NFCMiFareTag', 'NFCNDEFReaderSessionDelegate', 'NFCNDEFTag', 'NFCReaderSession', 'NFCReaderSessionDelegate', 'NFCTag', 'NFCTagReaderSessionDelegate', 'NFCVASReaderSessionDelegate', 'NISessionDelegate', 'NSCacheDelegate', 'NSCoding', 'NSCollectionLayoutContainer', 'NSCollectionLayoutEnvironment', 'NSCollectionLayoutVisibleItem', 'NSCopying', 'NSDecimalNumberBehaviors', 'NSDiscardableContent', 'NSExtensionRequestHandling', 'NSFastEnumeration', 'NSFetchRequestResult', 'NSFetchedResultsControllerDelegate', 'NSFetchedResultsSectionInfo', 'NSFileManagerDelegate', 'NSFilePresenter', 'NSFileProviderChangeObserver', 'NSFileProviderEnumerationObserver', 'NSFileProviderEnumerator', 'NSFileProviderItem', 'NSFileProviderServiceSource', 'NSItemProviderReading', 'NSItemProviderWriting', 'NSKeyedArchiverDelegate', 'NSKeyedUnarchiverDelegate', 'NSLayoutManagerDelegate', 'NSLocking', 'NSMachPortDelegate', 'NSMetadataQueryDelegate', 'NSMutableCopying', 'NSNetServiceBrowserDelegate', 'NSNetServiceDelegate', 'NSPortDelegate', 'NSProgressReporting', 'NSSecureCoding', 'NSStreamDelegate', 'NSTextAttachmentContainer', 'NSTextLayoutOrientationProvider', 'NSTextStorageDelegate', 'NSURLAuthenticationChallengeSender', 'NSURLConnectionDataDelegate', 'NSURLConnectionDelegate', 'NSURLConnectionDownloadDelegate', 'NSURLProtocolClient', 'NSURLSessionDataDelegate', 'NSURLSessionDelegate', 'NSURLSessionDownloadDelegate', 'NSURLSessionStreamDelegate', 'NSURLSessionTaskDelegate', 'NSURLSessionWebSocketDelegate', 'NSUserActivityDelegate', 'NSXMLParserDelegate', 'NSXPCListenerDelegate', 'NSXPCProxyCreating', 'NWTCPConnectionAuthenticationDelegate', 'OSLogEntryFromProcess', 'OSLogEntryWithPayload', 'PDFDocumentDelegate', 'PDFViewDelegate', 'PHContentEditingController', 'PHLivePhotoFrame', 'PHLivePhotoViewDelegate', 'PHPhotoLibraryAvailabilityObserver', 'PHPhotoLibraryChangeObserver', 'PHPickerViewControllerDelegate', 'PKAddPassesViewControllerDelegate', 'PKAddPaymentPassViewControllerDelegate', 'PKAddSecureElementPassViewControllerDelegate', 'PKCanvasViewDelegate', 'PKDisbursementAuthorizationControllerDelegate', 'PKIssuerProvisioningExtensionAuthorizationProviding', 'PKPaymentAuthorizationControllerDelegate', 'PKPaymentAuthorizationViewControllerDelegate', 'PKPaymentInformationRequestHandling', 'PKPushRegistryDelegate', 'PKToolPickerObserver', 'PreviewDisplaying', 'QLPreviewControllerDataSource', 'QLPreviewControllerDelegate', 'QLPreviewItem', 'QLPreviewingController', 'RPBroadcastActivityControllerDelegate', 'RPBroadcastActivityViewControllerDelegate', 'RPBroadcastControllerDelegate', 'RPPreviewViewControllerDelegate', 'RPScreenRecorderDelegate', 'SCNActionable', 'SCNAnimatable', 'SCNAnimation', 'SCNAvoidOccluderConstraintDelegate', 'SCNBoundingVolume', 'SCNBufferStream', 'SCNCameraControlConfiguration', 'SCNCameraControllerDelegate', 'SCNNodeRendererDelegate', 'SCNPhysicsContactDelegate', 'SCNProgramDelegate', 'SCNSceneExportDelegate', 'SCNSceneRenderer', 'SCNSceneRendererDelegate', 'SCNShadable', 'SCNTechniqueSupport', 'SFSafariViewControllerDelegate', 'SFSpeechRecognitionTaskDelegate', 'SFSpeechRecognizerDelegate', 'SKCloudServiceSetupViewControllerDelegate', 'SKOverlayDelegate', 'SKPaymentQueueDelegate', 'SKPaymentTransactionObserver', 'SKPhysicsContactDelegate', 'SKProductsRequestDelegate', 'SKRequestDelegate', 'SKSceneDelegate', 'SKStoreProductViewControllerDelegate', 'SKViewDelegate', 'SKWarpable', 'SNRequest', 'SNResult', 'SNResultsObserving', 'SRSensorReaderDelegate', 'TKSmartCardTokenDriverDelegate', 'TKSmartCardUserInteractionDelegate', 'TKTokenDelegate', 'TKTokenDriverDelegate', 'TKTokenSessionDelegate', 'UIAccelerometerDelegate', 'UIAccessibilityContainerDataTable', 'UIAccessibilityContainerDataTableCell', 'UIAccessibilityContentSizeCategoryImageAdjusting', 'UIAccessibilityIdentification', 'UIAccessibilityReadingContent', 'UIActionSheetDelegate', 'UIActivityItemSource', 'UIActivityItemsConfigurationReading', 'UIAdaptivePresentationControllerDelegate', 'UIAlertViewDelegate', 'UIAppearance', 'UIAppearanceContainer', 'UIApplicationDelegate', 'UIBarPositioning', 'UIBarPositioningDelegate', 'UICloudSharingControllerDelegate', 'UICollectionViewDataSource', 'UICollectionViewDataSourcePrefetching', 'UICollectionViewDelegate', 'UICollectionViewDelegateFlowLayout', 'UICollectionViewDragDelegate', 'UICollectionViewDropCoordinator', 'UICollectionViewDropDelegate', 'UICollectionViewDropItem', 'UICollectionViewDropPlaceholderContext', 'UICollisionBehaviorDelegate', 'UIColorPickerViewControllerDelegate', 'UIConfigurationState', 'UIContentConfiguration', 'UIContentContainer', 'UIContentSizeCategoryAdjusting', 'UIContentView', 'UIContextMenuInteractionAnimating', 'UIContextMenuInteractionCommitAnimating', 'UIContextMenuInteractionDelegate', 'UICoordinateSpace', 'UIDataSourceModelAssociation', 'UIDataSourceTranslating', 'UIDocumentBrowserViewControllerDelegate', 'UIDocumentInteractionControllerDelegate', 'UIDocumentMenuDelegate', 'UIDocumentPickerDelegate', 'UIDragAnimating', 'UIDragDropSession', 'UIDragInteractionDelegate', 'UIDragSession', 'UIDropInteractionDelegate', 'UIDropSession', 'UIDynamicAnimatorDelegate', 'UIDynamicItem', 'UIFocusAnimationContext', 'UIFocusDebuggerOutput', 'UIFocusEnvironment', 'UIFocusItem', 'UIFocusItemContainer', 'UIFocusItemScrollableContainer', 'UIFontPickerViewControllerDelegate', 'UIGestureRecognizerDelegate', 'UIGuidedAccessRestrictionDelegate', 'UIImageConfiguration', 'UIImagePickerControllerDelegate', 'UIIndirectScribbleInteractionDelegate', 'UIInputViewAudioFeedback', 'UIInteraction', 'UIItemProviderPresentationSizeProviding', 'UIKeyInput', 'UILargeContentViewerInteractionDelegate', 'UILargeContentViewerItem', 'UILayoutSupport', 'UIMenuBuilder', 'UINavigationBarDelegate', 'UINavigationControllerDelegate', 'UIObjectRestoration', 'UIPageViewControllerDataSource', 'UIPageViewControllerDelegate', 'UIPasteConfigurationSupporting', 'UIPencilInteractionDelegate', 'UIPickerViewAccessibilityDelegate', 'UIPickerViewDataSource', 'UIPickerViewDelegate', 'UIPointerInteractionAnimating', 'UIPointerInteractionDelegate', 'UIPopoverBackgroundViewMethods', 'UIPopoverControllerDelegate', 'UIPopoverPresentationControllerDelegate', 'UIPreviewActionItem', 'UIPreviewInteractionDelegate', 'UIPrintInteractionControllerDelegate', 'UIPrinterPickerControllerDelegate', 'UIResponderStandardEditActions', 'UISceneDelegate', 'UIScreenshotServiceDelegate', 'UIScribbleInteractionDelegate', 'UIScrollViewAccessibilityDelegate', 'UIScrollViewDelegate', 'UISearchBarDelegate', 'UISearchControllerDelegate', 'UISearchDisplayDelegate', 'UISearchResultsUpdating', 'UISearchSuggestion', 'UISearchTextFieldDelegate', 'UISearchTextFieldPasteItem', 'UISplitViewControllerDelegate', 'UISpringLoadedInteractionBehavior', 'UISpringLoadedInteractionContext', 'UISpringLoadedInteractionEffect', 'UISpringLoadedInteractionSupporting', 'UIStateRestoring', 'UITabBarControllerDelegate', 'UITabBarDelegate', 'UITableViewDataSource', 'UITableViewDataSourcePrefetching', 'UITableViewDelegate', 'UITableViewDragDelegate', 'UITableViewDropCoordinator', 'UITableViewDropDelegate', 'UITableViewDropItem', 'UITableViewDropPlaceholderContext', 'UITextDocumentProxy', 'UITextDragDelegate', 'UITextDragRequest', 'UITextDraggable', 'UITextDropDelegate', 'UITextDropRequest', 'UITextDroppable', 'UITextFieldDelegate', 'UITextFormattingCoordinatorDelegate', 'UITextInput', 'UITextInputDelegate', 'UITextInputTokenizer', 'UITextInputTraits', 'UITextInteractionDelegate', 'UITextPasteConfigurationSupporting', 'UITextPasteDelegate', 'UITextPasteItem', 'UITextSelecting', 'UITextViewDelegate', 'UITimingCurveProvider', 'UIToolbarDelegate', 'UITraitEnvironment', 'UIUserActivityRestoring', 'UIVideoEditorControllerDelegate', 'UIViewAnimating', 'UIViewControllerAnimatedTransitioning', 'UIViewControllerContextTransitioning', 'UIViewControllerInteractiveTransitioning', 'UIViewControllerPreviewing', 'UIViewControllerPreviewingDelegate', 'UIViewControllerRestoration', 'UIViewControllerTransitionCoordinator', 'UIViewControllerTransitionCoordinatorContext', 'UIViewControllerTransitioningDelegate', 'UIViewImplicitlyAnimating', 'UIWebViewDelegate', 'UIWindowSceneDelegate', 'UNNotificationContentExtension', 'UNUserNotificationCenterDelegate', 'VNDocumentCameraViewControllerDelegate', 'VNFaceObservationAccepting', 'VNRequestProgressProviding', 'VNRequestRevisionProviding', 'VSAccountManagerDelegate', 'WCSessionDelegate', 'WKHTTPCookieStoreObserver', 'WKNavigationDelegate', 'WKPreviewActionItem', 'WKScriptMessageHandler', 'WKScriptMessageHandlerWithReply', 'WKUIDelegate', 'WKURLSchemeHandler', 'WKURLSchemeTask'}
+COCOA_PRIMITIVES = {'ACErrorCode', 'ALCcontext_struct', 'ALCdevice_struct', 'ALMXGlyphEntry', 'ALMXHeader', 'API_UNAVAILABLE', 'AUChannelInfo', 'AUDependentParameter', 'AUDistanceAttenuationData', 'AUHostIdentifier', 'AUHostVersionIdentifier', 'AUInputSamplesInOutputCallbackStruct', 'AUMIDIEvent', 'AUMIDIOutputCallbackStruct', 'AUNodeInteraction', 'AUNodeRenderCallback', 'AUNumVersion', 'AUParameterAutomationEvent', 'AUParameterEvent', 'AUParameterMIDIMapping', 'AUPreset', 'AUPresetEvent', 'AURecordedParameterEvent', 'AURenderCallbackStruct', 'AURenderEventHeader', 'AUSamplerBankPresetData', 'AUSamplerInstrumentData', 'AnchorPoint', 'AnchorPointTable', 'AnkrTable', 'AudioBalanceFade', 'AudioBuffer', 'AudioBufferList', 'AudioBytePacketTranslation', 'AudioChannelDescription', 'AudioChannelLayout', 'AudioClassDescription', 'AudioCodecMagicCookieInfo', 'AudioCodecPrimeInfo', 'AudioComponentDescription', 'AudioComponentPlugInInterface', 'AudioConverterPrimeInfo', 'AudioFileMarker', 'AudioFileMarkerList', 'AudioFilePacketTableInfo', 'AudioFileRegion', 'AudioFileRegionList', 'AudioFileTypeAndFormatID', 'AudioFile_SMPTE_Time', 'AudioFormatInfo', 'AudioFormatListItem', 'AudioFramePacketTranslation', 'AudioIndependentPacketTranslation', 'AudioOutputUnitMIDICallbacks', 'AudioOutputUnitStartAtTimeParams', 'AudioPacketDependencyInfoTranslation', 'AudioPacketRangeByteCountTranslation', 'AudioPacketRollDistanceTranslation', 'AudioPanningInfo', 'AudioQueueBuffer', 'AudioQueueChannelAssignment', 'AudioQueueLevelMeterState', 'AudioQueueParameterEvent', 'AudioStreamBasicDescription', 'AudioStreamPacketDescription', 'AudioTimeStamp', 'AudioUnitCocoaViewInfo', 'AudioUnitConnection', 'AudioUnitExternalBuffer', 'AudioUnitFrequencyResponseBin', 'AudioUnitMIDIControlMapping', 'AudioUnitMeterClipping', 'AudioUnitNodeConnection', 'AudioUnitOtherPluginDesc', 'AudioUnitParameter', 'AudioUnitParameterEvent', 'AudioUnitParameterHistoryInfo', 'AudioUnitParameterInfo', 'AudioUnitParameterNameInfo', 'AudioUnitParameterStringFromValue', 'AudioUnitParameterValueFromString', 'AudioUnitParameterValueName', 'AudioUnitParameterValueTranslation', 'AudioUnitPresetMAS_SettingData', 'AudioUnitPresetMAS_Settings', 'AudioUnitProperty', 'AudioUnitRenderContext', 'AudioValueRange', 'AudioValueTranslation', 'AuthorizationOpaqueRef', 'BslnFormat0Part', 'BslnFormat1Part', 'BslnFormat2Part', 'BslnFormat3Part', 'BslnTable', 'CABarBeatTime', 'CAFAudioDescription', 'CAFChunkHeader', 'CAFDataChunk', 'CAFFileHeader', 'CAFInfoStrings', 'CAFInstrumentChunk', 'CAFMarker', 'CAFMarkerChunk', 'CAFOverviewChunk', 'CAFOverviewSample', 'CAFPacketTableHeader', 'CAFPeakChunk', 'CAFPositionPeak', 'CAFRegion', 'CAFRegionChunk', 'CAFStringID', 'CAFStrings', 'CAFUMIDChunk', 'CAF_SMPTE_Time', 'CAF_UUID_ChunkHeader', 'CA_BOXABLE', 'CFHostClientContext', 'CFNetServiceClientContext', 'CF_BRIDGED_MUTABLE_TYPE', 'CF_BRIDGED_TYPE', 'CF_RELATED_TYPE', 'CGAffineTransform', 'CGDataConsumerCallbacks', 'CGDataProviderDirectCallbacks', 'CGDataProviderSequentialCallbacks', 'CGFunctionCallbacks', 'CGPDFArray', 'CGPDFContentStream', 'CGPDFDictionary', 'CGPDFObject', 'CGPDFOperatorTable', 'CGPDFScanner', 'CGPDFStream', 'CGPDFString', 'CGPathElement', 'CGPatternCallbacks', 'CGVector', 'CG_BOXABLE', 'CLLocationCoordinate2D', 'CM_BRIDGED_TYPE', 'CTParagraphStyleSetting', 'CVPlanarComponentInfo', 'CVPlanarPixelBufferInfo', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'CVSMPTETime', 'CV_BRIDGED_TYPE', 'ComponentInstanceRecord', 'ExtendedAudioFormatInfo', 'ExtendedControlEvent', 'ExtendedNoteOnEvent', 'ExtendedTempoEvent', 'FontVariation', 'GCQuaternion', 'GKBox', 'GKQuad', 'GKTriangle', 'GLKEffectPropertyPrv', 'HostCallbackInfo', 'IIO_BRIDGED_TYPE', 'IUnknownVTbl', 'JustDirectionTable', 'JustPCAction', 'JustPCActionSubrecord', 'JustPCConditionalAddAction', 'JustPCDecompositionAction', 'JustPCDuctilityAction', 'JustPCGlyphRepeatAddAction', 'JustPostcompTable', 'JustTable', 'JustWidthDeltaEntry', 'JustWidthDeltaGroup', 'KernIndexArrayHeader', 'KernKerningPair', 'KernOffsetTable', 'KernOrderedListEntry', 'KernOrderedListHeader', 'KernSimpleArrayHeader', 'KernStateEntry', 'KernStateHeader', 'KernSubtableHeader', 'KernTableHeader', 'KernVersion0Header', 'KernVersion0SubtableHeader', 'KerxAnchorPointAction', 'KerxControlPointAction', 'KerxControlPointEntry', 'KerxControlPointHeader', 'KerxCoordinateAction', 'KerxIndexArrayHeader', 'KerxKerningPair', 'KerxOrderedListEntry', 'KerxOrderedListHeader', 'KerxSimpleArrayHeader', 'KerxStateEntry', 'KerxStateHeader', 'KerxSubtableHeader', 'KerxTableHeader', 'LcarCaretClassEntry', 'LcarCaretTable', 'LtagStringRange', 'LtagTable', 'MDL_CLASS_EXPORT', 'MIDICIDeviceIdentification', 'MIDIChannelMessage', 'MIDIControlTransform', 'MIDIDriverInterface', 'MIDIEventList', 'MIDIEventPacket', 'MIDIIOErrorNotification', 'MIDIMessage_128', 'MIDIMessage_64', 'MIDIMessage_96', 'MIDIMetaEvent', 'MIDINoteMessage', 'MIDINotification', 'MIDIObjectAddRemoveNotification', 'MIDIObjectPropertyChangeNotification', 'MIDIPacket', 'MIDIPacketList', 'MIDIRawData', 'MIDISysexSendRequest', 'MIDIThruConnectionEndpoint', 'MIDIThruConnectionParams', 'MIDITransform', 'MIDIValueMap', 'MPSDeviceOptions', 'MixerDistanceParams', 'MortChain', 'MortContextualSubtable', 'MortFeatureEntry', 'MortInsertionSubtable', 'MortLigatureSubtable', 'MortRearrangementSubtable', 'MortSubtable', 'MortSwashSubtable', 'MortTable', 'MorxChain', 'MorxContextualSubtable', 'MorxInsertionSubtable', 'MorxLigatureSubtable', 'MorxRearrangementSubtable', 'MorxSubtable', 'MorxTable', 'MusicDeviceNoteParams', 'MusicDeviceStdNoteParams', 'MusicEventUserData', 'MusicTrackLoopInfo', 'NoteParamsControlValue', 'OpaqueAudioComponent', 'OpaqueAudioComponentInstance', 'OpaqueAudioConverter', 'OpaqueAudioQueue', 'OpaqueAudioQueueProcessingTap', 'OpaqueAudioQueueTimeline', 'OpaqueExtAudioFile', 'OpaqueJSClass', 'OpaqueJSContext', 'OpaqueJSContextGroup', 'OpaqueJSPropertyNameAccumulator', 'OpaqueJSPropertyNameArray', 'OpaqueJSString', 'OpaqueJSValue', 'OpaqueMusicEventIterator', 'OpaqueMusicPlayer', 'OpaqueMusicSequence', 'OpaqueMusicTrack', 'OpbdSideValues', 'OpbdTable', 'ParameterEvent', 'PropLookupSegment', 'PropLookupSingle', 'PropTable', 'ROTAGlyphEntry', 'ROTAHeader', 'SCNMatrix4', 'SCNVector3', 'SCNVector4', 'SFNTLookupArrayHeader', 'SFNTLookupBinarySearchHeader', 'SFNTLookupSegment', 'SFNTLookupSegmentHeader', 'SFNTLookupSingle', 'SFNTLookupSingleHeader', 'SFNTLookupTable', 'SFNTLookupTrimmedArrayHeader', 'SFNTLookupVectorHeader', 'SMPTETime', 'STClassTable', 'STEntryOne', 'STEntryTwo', 'STEntryZero', 'STHeader', 'STXEntryOne', 'STXEntryTwo', 'STXEntryZero', 'STXHeader', 'ScheduledAudioFileRegion', 'ScheduledAudioSlice', 'SecKeychainAttribute', 'SecKeychainAttributeInfo', 'SecKeychainAttributeList', 'TrakTable', 'TrakTableData', 'TrakTableEntry', 'UIAccessibility', 'VTDecompressionOutputCallbackRecord', 'VTInt32Point', 'VTInt32Size', '_CFHTTPAuthentication', '_GLKMatrix2', '_GLKMatrix3', '_GLKMatrix4', '_GLKQuaternion', '_GLKVector2', '_GLKVector3', '_GLKVector4', '_GLKVertexAttributeParameters', '_MTLAxisAlignedBoundingBox', '_MTLPackedFloat3', '_MTLPackedFloat4x3', '_NSRange', '_NSZone', '__CFHTTPMessage', '__CFHost', '__CFNetDiagnostic', '__CFNetService', '__CFNetServiceBrowser', '__CFNetServiceMonitor', '__CFXMLNode', '__CFXMLParser', '__GLsync', '__SecAccess', '__SecCertificate', '__SecIdentity', '__SecKey', '__SecRandom', '__attribute__', 'gss_OID_desc_struct', 'gss_OID_set_desc_struct', 'gss_auth_identity', 'gss_buffer_desc_struct', 'gss_buffer_set_desc_struct', 'gss_channel_bindings_struct', 'gss_cred_id_t_desc_struct', 'gss_ctx_id_t_desc_struct', 'gss_iov_buffer_desc_struct', 'gss_krb5_cfx_keydata', 'gss_krb5_lucid_context_v1', 'gss_krb5_lucid_context_version', 'gss_krb5_lucid_key', 'gss_krb5_rfc1964_keydata', 'gss_name_t_desc_struct', 'opaqueCMBufferQueueTriggerToken', 'sfntCMapEncoding', 'sfntCMapExtendedSubHeader', 'sfntCMapHeader', 'sfntCMapSubHeader', 'sfntDescriptorHeader', 'sfntDirectory', 'sfntDirectoryEntry', 'sfntFeatureHeader', 'sfntFeatureName', 'sfntFontDescriptor', 'sfntFontFeatureSetting', 'sfntFontRunFeature', 'sfntInstance', 'sfntNameHeader', 'sfntNameRecord', 'sfntVariationAxis', 'sfntVariationHeader'}
+
+if __name__ == '__main__': # pragma: no cover
+ import os
+ import re
+
+ FRAMEWORKS_PATH = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/'
+ frameworks = os.listdir(FRAMEWORKS_PATH)
+
+ all_interfaces = set()
+ all_protocols = set()
+ all_primitives = set()
+ for framework in frameworks:
+ frameworkHeadersDir = FRAMEWORKS_PATH + framework + '/Headers/'
+ if not os.path.exists(frameworkHeadersDir):
+ continue
+
+ headerFilenames = os.listdir(frameworkHeadersDir)
+
+ for f in headerFilenames:
+ if not f.endswith('.h'):
+ continue
+ headerFilePath = frameworkHeadersDir + f
+
+ try:
+ with open(headerFilePath, encoding='utf-8') as f:
+ content = f.read()
+ except UnicodeDecodeError:
+ print("Decoding error for file: {0}".format(headerFilePath))
+ continue
+
+ res = re.findall(r'(?<=@interface )\w+', content)
+ for r in res:
+ all_interfaces.add(r)
+
+ res = re.findall(r'(?<=@protocol )\w+', content)
+ for r in res:
+ all_protocols.add(r)
+
+ res = re.findall(r'(?<=typedef enum )\w+', content)
+ for r in res:
+ all_primitives.add(r)
+
+ res = re.findall(r'(?<=typedef struct )\w+', content)
+ for r in res:
+ all_primitives.add(r)
+
+ res = re.findall(r'(?<=typedef const struct )\w+', content)
+ for r in res:
+ all_primitives.add(r)
+
+
+ print("ALL interfaces: \n")
+ print(sorted(list(all_interfaces)))
+
+ print("\nALL protocols: \n")
+ print(sorted(list(all_protocols)))
+
+ print("\nALL primitives: \n")
+ print(sorted(list(all_primitives)))
diff --git a/pygments/lexers/_csound_builtins.py b/pygments/lexers/_csound_builtins.py
new file mode 100644
index 0000000..8e1724d
--- /dev/null
+++ b/pygments/lexers/_csound_builtins.py
@@ -0,0 +1,1780 @@
+"""
+ pygments.lexers._csound_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+REMOVED_OPCODES = set('''
+OSCsendA
+beadsynt
+beosc
+buchla
+getrowlin
+lua_exec
+lua_iaopcall
+lua_iaopcall_off
+lua_ikopcall
+lua_ikopcall_off
+lua_iopcall
+lua_iopcall_off
+lua_opdef
+mp3scal_check
+mp3scal_load
+mp3scal_load2
+mp3scal_play
+mp3scal_play2
+pvsgendy
+socksend_k
+signalflowgraph
+sumTableFilter
+systime
+tabrowlin
+vbap1move
+'''.split())
+
+# Opcodes in Csound 6.18.0 using:
+# python3 -c "
+# import re
+# from subprocess import Popen, PIPE
+# output = Popen(['csound', '--list-opcodes0'], stderr=PIPE, text=True).communicate()[1]
+# opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split()
+# output = Popen(['csound', '--list-opcodes2'], stderr=PIPE, text=True).communicate()[1]
+# all_opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split()
+# deprecated_opcodes = [opcode for opcode in all_opcodes if opcode not in opcodes]
+# # Remove opcodes that csound.py treats as keywords.
+# keyword_opcodes = [
+# 'cggoto', # https://csound.com/docs/manual/cggoto.html
+# 'cigoto', # https://csound.com/docs/manual/cigoto.html
+# 'cingoto', # (undocumented)
+# 'ckgoto', # https://csound.com/docs/manual/ckgoto.html
+# 'cngoto', # https://csound.com/docs/manual/cngoto.html
+# 'cnkgoto', # (undocumented)
+# 'endin', # https://csound.com/docs/manual/endin.html
+# 'endop', # https://csound.com/docs/manual/endop.html
+# 'goto', # https://csound.com/docs/manual/goto.html
+# 'igoto', # https://csound.com/docs/manual/igoto.html
+# 'instr', # https://csound.com/docs/manual/instr.html
+# 'kgoto', # https://csound.com/docs/manual/kgoto.html
+# 'loop_ge', # https://csound.com/docs/manual/loop_ge.html
+# 'loop_gt', # https://csound.com/docs/manual/loop_gt.html
+# 'loop_le', # https://csound.com/docs/manual/loop_le.html
+# 'loop_lt', # https://csound.com/docs/manual/loop_lt.html
+# 'opcode', # https://csound.com/docs/manual/opcode.html
+# 'reinit', # https://csound.com/docs/manual/reinit.html
+# 'return', # https://csound.com/docs/manual/return.html
+# 'rireturn', # https://csound.com/docs/manual/rireturn.html
+# 'rigoto', # https://csound.com/docs/manual/rigoto.html
+# 'tigoto', # https://csound.com/docs/manual/tigoto.html
+# 'timout' # https://csound.com/docs/manual/timout.html
+# ]
+# opcodes = [opcode for opcode in opcodes if opcode not in keyword_opcodes]
+# newline = '\n'
+# print(f'''OPCODES = set(\'''
+# {newline.join(opcodes)}
+# \'''.split())
+#
+# DEPRECATED_OPCODES = set(\'''
+# {newline.join(deprecated_opcodes)}
+# \'''.split())
+# ''')
+# "
+
+OPCODES = set('''
+ATSadd
+ATSaddnz
+ATSbufread
+ATScross
+ATSinfo
+ATSinterpread
+ATSpartialtap
+ATSread
+ATSreadnz
+ATSsinnoi
+FLbox
+FLbutBank
+FLbutton
+FLcloseButton
+FLcolor
+FLcolor2
+FLcount
+FLexecButton
+FLgetsnap
+FLgroup
+FLgroupEnd
+FLgroup_end
+FLhide
+FLhvsBox
+FLhvsBoxSetValue
+FLjoy
+FLkeyIn
+FLknob
+FLlabel
+FLloadsnap
+FLmouse
+FLpack
+FLpackEnd
+FLpack_end
+FLpanel
+FLpanelEnd
+FLpanel_end
+FLprintk
+FLprintk2
+FLroller
+FLrun
+FLsavesnap
+FLscroll
+FLscrollEnd
+FLscroll_end
+FLsetAlign
+FLsetBox
+FLsetColor
+FLsetColor2
+FLsetFont
+FLsetPosition
+FLsetSize
+FLsetSnapGroup
+FLsetText
+FLsetTextColor
+FLsetTextSize
+FLsetTextType
+FLsetVal
+FLsetVal_i
+FLsetVali
+FLsetsnap
+FLshow
+FLslidBnk
+FLslidBnk2
+FLslidBnk2Set
+FLslidBnk2Setk
+FLslidBnkGetHandle
+FLslidBnkSet
+FLslidBnkSetk
+FLslider
+FLtabs
+FLtabsEnd
+FLtabs_end
+FLtext
+FLupdate
+FLvalue
+FLvkeybd
+FLvslidBnk
+FLvslidBnk2
+FLxyin
+JackoAudioIn
+JackoAudioInConnect
+JackoAudioOut
+JackoAudioOutConnect
+JackoFreewheel
+JackoInfo
+JackoInit
+JackoMidiInConnect
+JackoMidiOut
+JackoMidiOutConnect
+JackoNoteOut
+JackoOn
+JackoTransport
+K35_hpf
+K35_lpf
+MixerClear
+MixerGetLevel
+MixerReceive
+MixerSend
+MixerSetLevel
+MixerSetLevel_i
+OSCbundle
+OSCcount
+OSCinit
+OSCinitM
+OSClisten
+OSCraw
+OSCsend
+OSCsend_lo
+S
+STKBandedWG
+STKBeeThree
+STKBlowBotl
+STKBlowHole
+STKBowed
+STKBrass
+STKClarinet
+STKDrummer
+STKFMVoices
+STKFlute
+STKHevyMetl
+STKMandolin
+STKModalBar
+STKMoog
+STKPercFlut
+STKPlucked
+STKResonate
+STKRhodey
+STKSaxofony
+STKShakers
+STKSimple
+STKSitar
+STKStifKarp
+STKTubeBell
+STKVoicForm
+STKWhistle
+STKWurley
+a
+abs
+active
+adsr
+adsyn
+adsynt
+adsynt2
+aftouch
+allpole
+alpass
+alwayson
+ampdb
+ampdbfs
+ampmidi
+ampmidicurve
+ampmidid
+apoleparams
+arduinoRead
+arduinoReadF
+arduinoStart
+arduinoStop
+areson
+aresonk
+atone
+atonek
+atonex
+autocorr
+babo
+balance
+balance2
+bamboo
+barmodel
+bbcutm
+bbcuts
+betarand
+bexprnd
+bformdec1
+bformdec2
+bformenc1
+binit
+biquad
+biquada
+birnd
+bob
+bpf
+bpfcos
+bqrez
+butbp
+butbr
+buthp
+butlp
+butterbp
+butterbr
+butterhp
+butterlp
+button
+buzz
+c2r
+cabasa
+cauchy
+cauchyi
+cbrt
+ceil
+cell
+cent
+centroid
+ceps
+cepsinv
+chanctrl
+changed
+changed2
+chani
+chano
+chebyshevpoly
+checkbox
+chn_S
+chn_a
+chn_k
+chnclear
+chnexport
+chnget
+chngeta
+chngeti
+chngetk
+chngetks
+chngets
+chnmix
+chnparams
+chnset
+chnseta
+chnseti
+chnsetk
+chnsetks
+chnsets
+chuap
+clear
+clfilt
+clip
+clockoff
+clockon
+cmp
+cmplxprod
+cntCreate
+cntCycles
+cntDelete
+cntDelete_i
+cntRead
+cntReset
+cntState
+comb
+combinv
+compilecsd
+compileorc
+compilestr
+compress
+compress2
+connect
+control
+convle
+convolve
+copya2ftab
+copyf2array
+cos
+cosh
+cosinv
+cosseg
+cossegb
+cossegr
+count
+count_i
+cps2pch
+cpsmidi
+cpsmidib
+cpsmidinn
+cpsoct
+cpspch
+cpstmid
+cpstun
+cpstuni
+cpsxpch
+cpumeter
+cpuprc
+cross2
+crossfm
+crossfmi
+crossfmpm
+crossfmpmi
+crosspm
+crosspmi
+crunch
+ctlchn
+ctrl14
+ctrl21
+ctrl7
+ctrlinit
+ctrlpreset
+ctrlprint
+ctrlprintpresets
+ctrlsave
+ctrlselect
+cuserrnd
+dam
+date
+dates
+db
+dbamp
+dbfsamp
+dcblock
+dcblock2
+dconv
+dct
+dctinv
+deinterleave
+delay
+delay1
+delayk
+delayr
+delayw
+deltap
+deltap3
+deltapi
+deltapn
+deltapx
+deltapxw
+denorm
+diff
+diode_ladder
+directory
+diskgrain
+diskin
+diskin2
+dispfft
+display
+distort
+distort1
+divz
+doppler
+dot
+downsamp
+dripwater
+dssiactivate
+dssiaudio
+dssictls
+dssiinit
+dssilist
+dumpk
+dumpk2
+dumpk3
+dumpk4
+duserrnd
+dust
+dust2
+elapsedcycles
+elapsedtime
+envlpx
+envlpxr
+ephasor
+eqfil
+evalstr
+event
+event_i
+eventcycles
+eventtime
+exciter
+exitnow
+exp
+expcurve
+expon
+exprand
+exprandi
+expseg
+expsega
+expsegb
+expsegba
+expsegr
+fareylen
+fareyleni
+faustaudio
+faustcompile
+faustctl
+faustdsp
+faustgen
+faustplay
+fft
+fftinv
+ficlose
+filebit
+filelen
+filenchnls
+filepeak
+filescal
+filesr
+filevalid
+fillarray
+filter2
+fin
+fini
+fink
+fiopen
+flanger
+flashtxt
+flooper
+flooper2
+floor
+fluidAllOut
+fluidCCi
+fluidCCk
+fluidControl
+fluidEngine
+fluidInfo
+fluidLoad
+fluidNote
+fluidOut
+fluidProgramSelect
+fluidSetInterpMethod
+fmanal
+fmax
+fmb3
+fmbell
+fmin
+fmmetal
+fmod
+fmpercfl
+fmrhode
+fmvoice
+fmwurlie
+fof
+fof2
+fofilter
+fog
+fold
+follow
+follow2
+foscil
+foscili
+fout
+fouti
+foutir
+foutk
+fprintks
+fprints
+frac
+fractalnoise
+framebuffer
+freeverb
+ftaudio
+ftchnls
+ftconv
+ftcps
+ftexists
+ftfree
+ftgen
+ftgenonce
+ftgentmp
+ftlen
+ftload
+ftloadk
+ftlptim
+ftmorf
+ftom
+ftprint
+ftresize
+ftresizei
+ftsamplebank
+ftsave
+ftsavek
+ftset
+ftslice
+ftslicei
+ftsr
+gain
+gainslider
+gauss
+gaussi
+gausstrig
+gbuzz
+genarray
+genarray_i
+gendy
+gendyc
+gendyx
+getcfg
+getcol
+getftargs
+getrow
+getseed
+gogobel
+grain
+grain2
+grain3
+granule
+gtadsr
+gtf
+guiro
+harmon
+harmon2
+harmon3
+harmon4
+hdf5read
+hdf5write
+hilbert
+hilbert2
+hrtfearly
+hrtfmove
+hrtfmove2
+hrtfreverb
+hrtfstat
+hsboscil
+hvs1
+hvs2
+hvs3
+hypot
+i
+ihold
+imagecreate
+imagefree
+imagegetpixel
+imageload
+imagesave
+imagesetpixel
+imagesize
+in
+in32
+inch
+inh
+init
+initc14
+initc21
+initc7
+inleta
+inletf
+inletk
+inletkid
+inletv
+ino
+inq
+inrg
+ins
+insglobal
+insremot
+int
+integ
+interleave
+interp
+invalue
+inx
+inz
+jacktransport
+jitter
+jitter2
+joystick
+jspline
+k
+la_i_add_mc
+la_i_add_mr
+la_i_add_vc
+la_i_add_vr
+la_i_assign_mc
+la_i_assign_mr
+la_i_assign_t
+la_i_assign_vc
+la_i_assign_vr
+la_i_conjugate_mc
+la_i_conjugate_mr
+la_i_conjugate_vc
+la_i_conjugate_vr
+la_i_distance_vc
+la_i_distance_vr
+la_i_divide_mc
+la_i_divide_mr
+la_i_divide_vc
+la_i_divide_vr
+la_i_dot_mc
+la_i_dot_mc_vc
+la_i_dot_mr
+la_i_dot_mr_vr
+la_i_dot_vc
+la_i_dot_vr
+la_i_get_mc
+la_i_get_mr
+la_i_get_vc
+la_i_get_vr
+la_i_invert_mc
+la_i_invert_mr
+la_i_lower_solve_mc
+la_i_lower_solve_mr
+la_i_lu_det_mc
+la_i_lu_det_mr
+la_i_lu_factor_mc
+la_i_lu_factor_mr
+la_i_lu_solve_mc
+la_i_lu_solve_mr
+la_i_mc_create
+la_i_mc_set
+la_i_mr_create
+la_i_mr_set
+la_i_multiply_mc
+la_i_multiply_mr
+la_i_multiply_vc
+la_i_multiply_vr
+la_i_norm1_mc
+la_i_norm1_mr
+la_i_norm1_vc
+la_i_norm1_vr
+la_i_norm_euclid_mc
+la_i_norm_euclid_mr
+la_i_norm_euclid_vc
+la_i_norm_euclid_vr
+la_i_norm_inf_mc
+la_i_norm_inf_mr
+la_i_norm_inf_vc
+la_i_norm_inf_vr
+la_i_norm_max_mc
+la_i_norm_max_mr
+la_i_print_mc
+la_i_print_mr
+la_i_print_vc
+la_i_print_vr
+la_i_qr_eigen_mc
+la_i_qr_eigen_mr
+la_i_qr_factor_mc
+la_i_qr_factor_mr
+la_i_qr_sym_eigen_mc
+la_i_qr_sym_eigen_mr
+la_i_random_mc
+la_i_random_mr
+la_i_random_vc
+la_i_random_vr
+la_i_size_mc
+la_i_size_mr
+la_i_size_vc
+la_i_size_vr
+la_i_subtract_mc
+la_i_subtract_mr
+la_i_subtract_vc
+la_i_subtract_vr
+la_i_t_assign
+la_i_trace_mc
+la_i_trace_mr
+la_i_transpose_mc
+la_i_transpose_mr
+la_i_upper_solve_mc
+la_i_upper_solve_mr
+la_i_vc_create
+la_i_vc_set
+la_i_vr_create
+la_i_vr_set
+la_k_a_assign
+la_k_add_mc
+la_k_add_mr
+la_k_add_vc
+la_k_add_vr
+la_k_assign_a
+la_k_assign_f
+la_k_assign_mc
+la_k_assign_mr
+la_k_assign_t
+la_k_assign_vc
+la_k_assign_vr
+la_k_conjugate_mc
+la_k_conjugate_mr
+la_k_conjugate_vc
+la_k_conjugate_vr
+la_k_current_f
+la_k_current_vr
+la_k_distance_vc
+la_k_distance_vr
+la_k_divide_mc
+la_k_divide_mr
+la_k_divide_vc
+la_k_divide_vr
+la_k_dot_mc
+la_k_dot_mc_vc
+la_k_dot_mr
+la_k_dot_mr_vr
+la_k_dot_vc
+la_k_dot_vr
+la_k_f_assign
+la_k_get_mc
+la_k_get_mr
+la_k_get_vc
+la_k_get_vr
+la_k_invert_mc
+la_k_invert_mr
+la_k_lower_solve_mc
+la_k_lower_solve_mr
+la_k_lu_det_mc
+la_k_lu_det_mr
+la_k_lu_factor_mc
+la_k_lu_factor_mr
+la_k_lu_solve_mc
+la_k_lu_solve_mr
+la_k_mc_set
+la_k_mr_set
+la_k_multiply_mc
+la_k_multiply_mr
+la_k_multiply_vc
+la_k_multiply_vr
+la_k_norm1_mc
+la_k_norm1_mr
+la_k_norm1_vc
+la_k_norm1_vr
+la_k_norm_euclid_mc
+la_k_norm_euclid_mr
+la_k_norm_euclid_vc
+la_k_norm_euclid_vr
+la_k_norm_inf_mc
+la_k_norm_inf_mr
+la_k_norm_inf_vc
+la_k_norm_inf_vr
+la_k_norm_max_mc
+la_k_norm_max_mr
+la_k_qr_eigen_mc
+la_k_qr_eigen_mr
+la_k_qr_factor_mc
+la_k_qr_factor_mr
+la_k_qr_sym_eigen_mc
+la_k_qr_sym_eigen_mr
+la_k_random_mc
+la_k_random_mr
+la_k_random_vc
+la_k_random_vr
+la_k_subtract_mc
+la_k_subtract_mr
+la_k_subtract_vc
+la_k_subtract_vr
+la_k_t_assign
+la_k_trace_mc
+la_k_trace_mr
+la_k_upper_solve_mc
+la_k_upper_solve_mr
+la_k_vc_set
+la_k_vr_set
+lag
+lagud
+lastcycle
+lenarray
+lfo
+lfsr
+limit
+limit1
+lincos
+line
+linen
+linenr
+lineto
+link_beat_force
+link_beat_get
+link_beat_request
+link_create
+link_enable
+link_is_enabled
+link_metro
+link_peers
+link_tempo_get
+link_tempo_set
+linlin
+linrand
+linseg
+linsegb
+linsegr
+liveconv
+locsend
+locsig
+log
+log10
+log2
+logbtwo
+logcurve
+loopseg
+loopsegp
+looptseg
+loopxseg
+lorenz
+loscil
+loscil3
+loscil3phs
+loscilphs
+loscilx
+lowpass2
+lowres
+lowresx
+lpcanal
+lpcfilter
+lpf18
+lpform
+lpfreson
+lphasor
+lpinterp
+lposcil
+lposcil3
+lposcila
+lposcilsa
+lposcilsa2
+lpread
+lpreson
+lpshold
+lpsholdp
+lpslot
+lufs
+mac
+maca
+madsr
+mags
+mandel
+mandol
+maparray
+maparray_i
+marimba
+massign
+max
+max_k
+maxabs
+maxabsaccum
+maxaccum
+maxalloc
+maxarray
+mclock
+mdelay
+median
+mediank
+metro
+metro2
+metrobpm
+mfb
+midglobal
+midiarp
+midic14
+midic21
+midic7
+midichannelaftertouch
+midichn
+midicontrolchange
+midictrl
+mididefault
+midifilestatus
+midiin
+midinoteoff
+midinoteoncps
+midinoteonkey
+midinoteonoct
+midinoteonpch
+midion
+midion2
+midiout
+midiout_i
+midipgm
+midipitchbend
+midipolyaftertouch
+midiprogramchange
+miditempo
+midremot
+min
+minabs
+minabsaccum
+minaccum
+minarray
+mincer
+mirror
+mode
+modmatrix
+monitor
+moog
+moogladder
+moogladder2
+moogvcf
+moogvcf2
+moscil
+mp3bitrate
+mp3in
+mp3len
+mp3nchnls
+mp3out
+mp3scal
+mp3sr
+mpulse
+mrtmsg
+ms2st
+mtof
+mton
+multitap
+mute
+mvchpf
+mvclpf1
+mvclpf2
+mvclpf3
+mvclpf4
+mvmfilter
+mxadsr
+nchnls_hw
+nestedap
+nlalp
+nlfilt
+nlfilt2
+noise
+noteoff
+noteon
+noteondur
+noteondur2
+notnum
+nreverb
+nrpn
+nsamp
+nstance
+nstrnum
+nstrstr
+ntof
+ntom
+ntrpol
+nxtpow2
+octave
+octcps
+octmidi
+octmidib
+octmidinn
+octpch
+olabuffer
+oscbnk
+oscil
+oscil1
+oscil1i
+oscil3
+oscili
+oscilikt
+osciliktp
+oscilikts
+osciln
+oscils
+oscilx
+out
+out32
+outall
+outc
+outch
+outh
+outiat
+outic
+outic14
+outipat
+outipb
+outipc
+outkat
+outkc
+outkc14
+outkpat
+outkpb
+outkpc
+outleta
+outletf
+outletk
+outletkid
+outletv
+outo
+outq
+outq1
+outq2
+outq3
+outq4
+outrg
+outs
+outs1
+outs2
+outvalue
+outx
+outz
+p
+p5gconnect
+p5gdata
+pan
+pan2
+pareq
+part2txt
+partials
+partikkel
+partikkelget
+partikkelset
+partikkelsync
+passign
+paulstretch
+pcauchy
+pchbend
+pchmidi
+pchmidib
+pchmidinn
+pchoct
+pchtom
+pconvolve
+pcount
+pdclip
+pdhalf
+pdhalfy
+peak
+pgmassign
+pgmchn
+phaser1
+phaser2
+phasor
+phasorbnk
+phs
+pindex
+pinker
+pinkish
+pitch
+pitchac
+pitchamdf
+planet
+platerev
+plltrack
+pluck
+poisson
+pol2rect
+polyaft
+polynomial
+port
+portk
+poscil
+poscil3
+pow
+powershape
+powoftwo
+pows
+prealloc
+prepiano
+print
+print_type
+printarray
+printf
+printf_i
+printk
+printk2
+printks
+printks2
+println
+prints
+printsk
+product
+pset
+ptablew
+ptrack
+puts
+pvadd
+pvbufread
+pvcross
+pvinterp
+pvoc
+pvread
+pvs2array
+pvs2tab
+pvsadsyn
+pvsanal
+pvsarp
+pvsbandp
+pvsbandr
+pvsbandwidth
+pvsbin
+pvsblur
+pvsbuffer
+pvsbufread
+pvsbufread2
+pvscale
+pvscent
+pvsceps
+pvscfs
+pvscross
+pvsdemix
+pvsdiskin
+pvsdisp
+pvsenvftw
+pvsfilter
+pvsfread
+pvsfreeze
+pvsfromarray
+pvsftr
+pvsftw
+pvsfwrite
+pvsgain
+pvsgendy
+pvshift
+pvsifd
+pvsin
+pvsinfo
+pvsinit
+pvslock
+pvslpc
+pvsmaska
+pvsmix
+pvsmooth
+pvsmorph
+pvsosc
+pvsout
+pvspitch
+pvstanal
+pvstencil
+pvstrace
+pvsvoc
+pvswarp
+pvsynth
+pwd
+pyassign
+pyassigni
+pyassignt
+pycall
+pycall1
+pycall1i
+pycall1t
+pycall2
+pycall2i
+pycall2t
+pycall3
+pycall3i
+pycall3t
+pycall4
+pycall4i
+pycall4t
+pycall5
+pycall5i
+pycall5t
+pycall6
+pycall6i
+pycall6t
+pycall7
+pycall7i
+pycall7t
+pycall8
+pycall8i
+pycall8t
+pycalli
+pycalln
+pycallni
+pycallt
+pyeval
+pyevali
+pyevalt
+pyexec
+pyexeci
+pyexect
+pyinit
+pylassign
+pylassigni
+pylassignt
+pylcall
+pylcall1
+pylcall1i
+pylcall1t
+pylcall2
+pylcall2i
+pylcall2t
+pylcall3
+pylcall3i
+pylcall3t
+pylcall4
+pylcall4i
+pylcall4t
+pylcall5
+pylcall5i
+pylcall5t
+pylcall6
+pylcall6i
+pylcall6t
+pylcall7
+pylcall7i
+pylcall7t
+pylcall8
+pylcall8i
+pylcall8t
+pylcalli
+pylcalln
+pylcallni
+pylcallt
+pyleval
+pylevali
+pylevalt
+pylexec
+pylexeci
+pylexect
+pylrun
+pylruni
+pylrunt
+pyrun
+pyruni
+pyrunt
+qinf
+qnan
+r2c
+rand
+randc
+randh
+randi
+random
+randomh
+randomi
+rbjeq
+readclock
+readf
+readfi
+readk
+readk2
+readk3
+readk4
+readks
+readscore
+readscratch
+rect2pol
+release
+remoteport
+remove
+repluck
+reshapearray
+reson
+resonbnk
+resonk
+resonr
+resonx
+resonxk
+resony
+resonz
+resyn
+reverb
+reverb2
+reverbsc
+rewindscore
+rezzy
+rfft
+rifft
+rms
+rnd
+rnd31
+rndseed
+round
+rspline
+rtclock
+s16b14
+s32b14
+samphold
+sandpaper
+sc_lag
+sc_lagud
+sc_phasor
+sc_trig
+scale
+scale2
+scalearray
+scanhammer
+scanmap
+scans
+scansmap
+scantable
+scanu
+scanu2
+schedkwhen
+schedkwhennamed
+schedule
+schedulek
+schedwhen
+scoreline
+scoreline_i
+seed
+sekere
+select
+semitone
+sense
+sensekey
+seqtime
+seqtime2
+sequ
+sequstate
+serialBegin
+serialEnd
+serialFlush
+serialPrint
+serialRead
+serialWrite
+serialWrite_i
+setcol
+setctrl
+setksmps
+setrow
+setscorepos
+sfilist
+sfinstr
+sfinstr3
+sfinstr3m
+sfinstrm
+sfload
+sflooper
+sfpassign
+sfplay
+sfplay3
+sfplay3m
+sfplaym
+sfplist
+sfpreset
+shaker
+shiftin
+shiftout
+signum
+sin
+sinh
+sininv
+sinsyn
+skf
+sleighbells
+slicearray
+slicearray_i
+slider16
+slider16f
+slider16table
+slider16tablef
+slider32
+slider32f
+slider32table
+slider32tablef
+slider64
+slider64f
+slider64table
+slider64tablef
+slider8
+slider8f
+slider8table
+slider8tablef
+sliderKawai
+sndloop
+sndwarp
+sndwarpst
+sockrecv
+sockrecvs
+socksend
+socksends
+sorta
+sortd
+soundin
+space
+spat3d
+spat3di
+spat3dt
+spdist
+spf
+splitrig
+sprintf
+sprintfk
+spsend
+sqrt
+squinewave
+st2ms
+statevar
+sterrain
+stix
+strcat
+strcatk
+strchar
+strchark
+strcmp
+strcmpk
+strcpy
+strcpyk
+strecv
+streson
+strfromurl
+strget
+strindex
+strindexk
+string2array
+strlen
+strlenk
+strlower
+strlowerk
+strrindex
+strrindexk
+strset
+strstrip
+strsub
+strsubk
+strtod
+strtodk
+strtol
+strtolk
+strupper
+strupperk
+stsend
+subinstr
+subinstrinit
+sum
+sumarray
+svfilter
+svn
+syncgrain
+syncloop
+syncphasor
+system
+system_i
+tab
+tab2array
+tab2pvs
+tab_i
+tabifd
+table
+table3
+table3kt
+tablecopy
+tablefilter
+tablefilteri
+tablegpw
+tablei
+tableicopy
+tableigpw
+tableikt
+tableimix
+tablekt
+tablemix
+tableng
+tablera
+tableseg
+tableshuffle
+tableshufflei
+tablew
+tablewa
+tablewkt
+tablexkt
+tablexseg
+tabmorph
+tabmorpha
+tabmorphak
+tabmorphi
+tabplay
+tabrec
+tabsum
+tabw
+tabw_i
+tambourine
+tan
+tanh
+taninv
+taninv2
+tbvcf
+tempest
+tempo
+temposcal
+tempoval
+timedseq
+timeinstk
+timeinsts
+timek
+times
+tival
+tlineto
+tone
+tonek
+tonex
+tradsyn
+trandom
+transeg
+transegb
+transegr
+trcross
+trfilter
+trhighest
+trigExpseg
+trigLinseg
+trigexpseg
+trigger
+trighold
+triglinseg
+trigphasor
+trigseq
+trim
+trim_i
+trirand
+trlowest
+trmix
+trscale
+trshift
+trsplit
+turnoff
+turnoff2
+turnoff2_i
+turnoff3
+turnon
+tvconv
+unirand
+unwrap
+upsamp
+urandom
+urd
+vactrol
+vadd
+vadd_i
+vaddv
+vaddv_i
+vaget
+valpass
+vaset
+vbap
+vbapg
+vbapgmove
+vbaplsinit
+vbapmove
+vbapz
+vbapzmove
+vcella
+vclpf
+vco
+vco2
+vco2ft
+vco2ift
+vco2init
+vcomb
+vcopy
+vcopy_i
+vdel_k
+vdelay
+vdelay3
+vdelayk
+vdelayx
+vdelayxq
+vdelayxs
+vdelayxw
+vdelayxwq
+vdelayxws
+vdivv
+vdivv_i
+vecdelay
+veloc
+vexp
+vexp_i
+vexpseg
+vexpv
+vexpv_i
+vibes
+vibr
+vibrato
+vincr
+vlimit
+vlinseg
+vlowres
+vmap
+vmirror
+vmult
+vmult_i
+vmultv
+vmultv_i
+voice
+vosim
+vphaseseg
+vport
+vpow
+vpow_i
+vpowv
+vpowv_i
+vps
+vpvoc
+vrandh
+vrandi
+vsubv
+vsubv_i
+vtaba
+vtabi
+vtabk
+vtable1k
+vtablea
+vtablei
+vtablek
+vtablewa
+vtablewi
+vtablewk
+vtabwa
+vtabwi
+vtabwk
+vwrap
+waveset
+websocket
+weibull
+wgbow
+wgbowedbar
+wgbrass
+wgclar
+wgflute
+wgpluck
+wgpluck2
+wguide1
+wguide2
+wiiconnect
+wiidata
+wiirange
+wiisend
+window
+wrap
+writescratch
+wterrain
+wterrain2
+xadsr
+xin
+xout
+xtratim
+xyscale
+zacl
+zakinit
+zamod
+zar
+zarg
+zaw
+zawm
+zdf_1pole
+zdf_1pole_mode
+zdf_2pole
+zdf_2pole_mode
+zdf_ladder
+zfilter2
+zir
+ziw
+ziwm
+zkcl
+zkmod
+zkr
+zkw
+zkwm
+'''.split())
+
+DEPRECATED_OPCODES = set('''
+array
+bformdec
+bformenc
+copy2ftab
+copy2ttab
+hrtfer
+ktableseg
+lentab
+maxtab
+mintab
+pop
+pop_f
+ptable
+ptable3
+ptablei
+ptableiw
+push
+push_f
+scalet
+sndload
+soundout
+soundouts
+specaddm
+specdiff
+specdisp
+specfilt
+spechist
+specptrk
+specscal
+specsum
+spectrum
+stack
+sumtab
+tabgen
+tableiw
+tabmap
+tabmap_i
+tabslice
+tb0
+tb0_init
+tb1
+tb10
+tb10_init
+tb11
+tb11_init
+tb12
+tb12_init
+tb13
+tb13_init
+tb14
+tb14_init
+tb15
+tb15_init
+tb1_init
+tb2
+tb2_init
+tb3
+tb3_init
+tb4
+tb4_init
+tb5
+tb5_init
+tb6
+tb6_init
+tb7
+tb7_init
+tb8
+tb8_init
+tb9
+tb9_init
+vbap16
+vbap4
+vbap4move
+vbap8
+vbap8move
+xscanmap
+xscans
+xscansmap
+xscanu
+xyin
+'''.split())
diff --git a/pygments/lexers/_css_builtins.py b/pygments/lexers/_css_builtins.py
new file mode 100644
index 0000000..785a6f3
--- /dev/null
+++ b/pygments/lexers/_css_builtins.py
@@ -0,0 +1,558 @@
+"""
+ pygments.lexers._css_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file is autogenerated by scripts/get_css_properties.py
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+_css_properties = (
+ '-webkit-line-clamp',
+ 'accent-color',
+ 'align-content',
+ 'align-items',
+ 'align-self',
+ 'alignment-baseline',
+ 'all',
+ 'animation',
+ 'animation-delay',
+ 'animation-direction',
+ 'animation-duration',
+ 'animation-fill-mode',
+ 'animation-iteration-count',
+ 'animation-name',
+ 'animation-play-state',
+ 'animation-timing-function',
+ 'appearance',
+ 'aspect-ratio',
+ 'azimuth',
+ 'backface-visibility',
+ 'background',
+ 'background-attachment',
+ 'background-blend-mode',
+ 'background-clip',
+ 'background-color',
+ 'background-image',
+ 'background-origin',
+ 'background-position',
+ 'background-repeat',
+ 'background-size',
+ 'baseline-shift',
+ 'baseline-source',
+ 'block-ellipsis',
+ 'block-size',
+ 'block-step',
+ 'block-step-align',
+ 'block-step-insert',
+ 'block-step-round',
+ 'block-step-size',
+ 'bookmark-label',
+ 'bookmark-level',
+ 'bookmark-state',
+ 'border',
+ 'border-block',
+ 'border-block-color',
+ 'border-block-end',
+ 'border-block-end-color',
+ 'border-block-end-style',
+ 'border-block-end-width',
+ 'border-block-start',
+ 'border-block-start-color',
+ 'border-block-start-style',
+ 'border-block-start-width',
+ 'border-block-style',
+ 'border-block-width',
+ 'border-bottom',
+ 'border-bottom-color',
+ 'border-bottom-left-radius',
+ 'border-bottom-right-radius',
+ 'border-bottom-style',
+ 'border-bottom-width',
+ 'border-boundary',
+ 'border-collapse',
+ 'border-color',
+ 'border-end-end-radius',
+ 'border-end-start-radius',
+ 'border-image',
+ 'border-image-outset',
+ 'border-image-repeat',
+ 'border-image-slice',
+ 'border-image-source',
+ 'border-image-width',
+ 'border-inline',
+ 'border-inline-color',
+ 'border-inline-end',
+ 'border-inline-end-color',
+ 'border-inline-end-style',
+ 'border-inline-end-width',
+ 'border-inline-start',
+ 'border-inline-start-color',
+ 'border-inline-start-style',
+ 'border-inline-start-width',
+ 'border-inline-style',
+ 'border-inline-width',
+ 'border-left',
+ 'border-left-color',
+ 'border-left-style',
+ 'border-left-width',
+ 'border-radius',
+ 'border-right',
+ 'border-right-color',
+ 'border-right-style',
+ 'border-right-width',
+ 'border-spacing',
+ 'border-start-end-radius',
+ 'border-start-start-radius',
+ 'border-style',
+ 'border-top',
+ 'border-top-color',
+ 'border-top-left-radius',
+ 'border-top-right-radius',
+ 'border-top-style',
+ 'border-top-width',
+ 'border-width',
+ 'bottom',
+ 'box-decoration-break',
+ 'box-shadow',
+ 'box-sizing',
+ 'box-snap',
+ 'break-after',
+ 'break-before',
+ 'break-inside',
+ 'caption-side',
+ 'caret',
+ 'caret-color',
+ 'caret-shape',
+ 'chains',
+ 'clear',
+ 'clip',
+ 'clip-path',
+ 'clip-rule',
+ 'color',
+ 'color-adjust',
+ 'color-interpolation-filters',
+ 'color-scheme',
+ 'column-count',
+ 'column-fill',
+ 'column-gap',
+ 'column-rule',
+ 'column-rule-color',
+ 'column-rule-style',
+ 'column-rule-width',
+ 'column-span',
+ 'column-width',
+ 'columns',
+ 'contain',
+ 'contain-intrinsic-block-size',
+ 'contain-intrinsic-height',
+ 'contain-intrinsic-inline-size',
+ 'contain-intrinsic-size',
+ 'contain-intrinsic-width',
+ 'container',
+ 'container-name',
+ 'container-type',
+ 'content',
+ 'content-visibility',
+ 'continue',
+ 'counter-increment',
+ 'counter-reset',
+ 'counter-set',
+ 'cue',
+ 'cue-after',
+ 'cue-before',
+ 'cursor',
+ 'direction',
+ 'display',
+ 'dominant-baseline',
+ 'elevation',
+ 'empty-cells',
+ 'fill',
+ 'fill-break',
+ 'fill-color',
+ 'fill-image',
+ 'fill-opacity',
+ 'fill-origin',
+ 'fill-position',
+ 'fill-repeat',
+ 'fill-rule',
+ 'fill-size',
+ 'filter',
+ 'flex',
+ 'flex-basis',
+ 'flex-direction',
+ 'flex-flow',
+ 'flex-grow',
+ 'flex-shrink',
+ 'flex-wrap',
+ 'float',
+ 'float-defer',
+ 'float-offset',
+ 'float-reference',
+ 'flood-color',
+ 'flood-opacity',
+ 'flow',
+ 'flow-from',
+ 'flow-into',
+ 'font',
+ 'font-family',
+ 'font-feature-settings',
+ 'font-kerning',
+ 'font-language-override',
+ 'font-optical-sizing',
+ 'font-palette',
+ 'font-size',
+ 'font-size-adjust',
+ 'font-stretch',
+ 'font-style',
+ 'font-synthesis',
+ 'font-synthesis-small-caps',
+ 'font-synthesis-style',
+ 'font-synthesis-weight',
+ 'font-variant',
+ 'font-variant-alternates',
+ 'font-variant-caps',
+ 'font-variant-east-asian',
+ 'font-variant-emoji',
+ 'font-variant-ligatures',
+ 'font-variant-numeric',
+ 'font-variant-position',
+ 'font-variation-settings',
+ 'font-weight',
+ 'footnote-display',
+ 'footnote-policy',
+ 'forced-color-adjust',
+ 'gap',
+ 'glyph-orientation-vertical',
+ 'grid',
+ 'grid-area',
+ 'grid-auto-columns',
+ 'grid-auto-flow',
+ 'grid-auto-rows',
+ 'grid-column',
+ 'grid-column-end',
+ 'grid-column-start',
+ 'grid-row',
+ 'grid-row-end',
+ 'grid-row-start',
+ 'grid-template',
+ 'grid-template-areas',
+ 'grid-template-columns',
+ 'grid-template-rows',
+ 'hanging-punctuation',
+ 'height',
+ 'hyphenate-character',
+ 'hyphenate-limit-chars',
+ 'hyphenate-limit-last',
+ 'hyphenate-limit-lines',
+ 'hyphenate-limit-zone',
+ 'hyphens',
+ 'image-orientation',
+ 'image-rendering',
+ 'image-resolution',
+ 'initial-letter',
+ 'initial-letter-align',
+ 'initial-letter-wrap',
+ 'inline-size',
+ 'inline-sizing',
+ 'input-security',
+ 'inset',
+ 'inset-block',
+ 'inset-block-end',
+ 'inset-block-start',
+ 'inset-inline',
+ 'inset-inline-end',
+ 'inset-inline-start',
+ 'isolation',
+ 'justify-content',
+ 'justify-items',
+ 'justify-self',
+ 'leading-trim',
+ 'left',
+ 'letter-spacing',
+ 'lighting-color',
+ 'line-break',
+ 'line-clamp',
+ 'line-grid',
+ 'line-height',
+ 'line-height-step',
+ 'line-padding',
+ 'line-snap',
+ 'list-style',
+ 'list-style-image',
+ 'list-style-position',
+ 'list-style-type',
+ 'margin',
+ 'margin-block',
+ 'margin-block-end',
+ 'margin-block-start',
+ 'margin-bottom',
+ 'margin-break',
+ 'margin-inline',
+ 'margin-inline-end',
+ 'margin-inline-start',
+ 'margin-left',
+ 'margin-right',
+ 'margin-top',
+ 'margin-trim',
+ 'marker',
+ 'marker-end',
+ 'marker-knockout-left',
+ 'marker-knockout-right',
+ 'marker-mid',
+ 'marker-pattern',
+ 'marker-segment',
+ 'marker-side',
+ 'marker-start',
+ 'mask',
+ 'mask-border',
+ 'mask-border-mode',
+ 'mask-border-outset',
+ 'mask-border-repeat',
+ 'mask-border-slice',
+ 'mask-border-source',
+ 'mask-border-width',
+ 'mask-clip',
+ 'mask-composite',
+ 'mask-image',
+ 'mask-mode',
+ 'mask-origin',
+ 'mask-position',
+ 'mask-repeat',
+ 'mask-size',
+ 'mask-type',
+ 'max-block-size',
+ 'max-height',
+ 'max-inline-size',
+ 'max-lines',
+ 'max-width',
+ 'min-block-size',
+ 'min-height',
+ 'min-inline-size',
+ 'min-intrinsic-sizing',
+ 'min-width',
+ 'mix-blend-mode',
+ 'nav-down',
+ 'nav-left',
+ 'nav-right',
+ 'nav-up',
+ 'object-fit',
+ 'object-overflow',
+ 'object-position',
+ 'object-view-box',
+ 'offset',
+ 'offset-anchor',
+ 'offset-distance',
+ 'offset-path',
+ 'offset-position',
+ 'offset-rotate',
+ 'opacity',
+ 'order',
+ 'orphans',
+ 'outline',
+ 'outline-color',
+ 'outline-offset',
+ 'outline-style',
+ 'outline-width',
+ 'overflow',
+ 'overflow-anchor',
+ 'overflow-block',
+ 'overflow-clip-margin',
+ 'overflow-inline',
+ 'overflow-wrap',
+ 'overflow-x',
+ 'overflow-y',
+ 'overscroll-behavior',
+ 'overscroll-behavior-block',
+ 'overscroll-behavior-inline',
+ 'overscroll-behavior-x',
+ 'overscroll-behavior-y',
+ 'padding',
+ 'padding-block',
+ 'padding-block-end',
+ 'padding-block-start',
+ 'padding-bottom',
+ 'padding-inline',
+ 'padding-inline-end',
+ 'padding-inline-start',
+ 'padding-left',
+ 'padding-right',
+ 'padding-top',
+ 'page',
+ 'page-break-after',
+ 'page-break-before',
+ 'page-break-inside',
+ 'pause',
+ 'pause-after',
+ 'pause-before',
+ 'perspective',
+ 'perspective-origin',
+ 'pitch',
+ 'pitch-range',
+ 'place-content',
+ 'place-items',
+ 'place-self',
+ 'play-during',
+ 'pointer-events',
+ 'position',
+ 'print-color-adjust',
+ 'property-name',
+ 'quotes',
+ 'region-fragment',
+ 'resize',
+ 'rest',
+ 'rest-after',
+ 'rest-before',
+ 'richness',
+ 'right',
+ 'rotate',
+ 'row-gap',
+ 'ruby-align',
+ 'ruby-merge',
+ 'ruby-overhang',
+ 'ruby-position',
+ 'running',
+ 'scale',
+ 'scroll-behavior',
+ 'scroll-margin',
+ 'scroll-margin-block',
+ 'scroll-margin-block-end',
+ 'scroll-margin-block-start',
+ 'scroll-margin-bottom',
+ 'scroll-margin-inline',
+ 'scroll-margin-inline-end',
+ 'scroll-margin-inline-start',
+ 'scroll-margin-left',
+ 'scroll-margin-right',
+ 'scroll-margin-top',
+ 'scroll-padding',
+ 'scroll-padding-block',
+ 'scroll-padding-block-end',
+ 'scroll-padding-block-start',
+ 'scroll-padding-bottom',
+ 'scroll-padding-inline',
+ 'scroll-padding-inline-end',
+ 'scroll-padding-inline-start',
+ 'scroll-padding-left',
+ 'scroll-padding-right',
+ 'scroll-padding-top',
+ 'scroll-snap-align',
+ 'scroll-snap-stop',
+ 'scroll-snap-type',
+ 'scrollbar-color',
+ 'scrollbar-gutter',
+ 'scrollbar-width',
+ 'shape-image-threshold',
+ 'shape-inside',
+ 'shape-margin',
+ 'shape-outside',
+ 'spatial-navigation-action',
+ 'spatial-navigation-contain',
+ 'spatial-navigation-function',
+ 'speak',
+ 'speak-as',
+ 'speak-header',
+ 'speak-numeral',
+ 'speak-punctuation',
+ 'speech-rate',
+ 'stress',
+ 'string-set',
+ 'stroke',
+ 'stroke-align',
+ 'stroke-alignment',
+ 'stroke-break',
+ 'stroke-color',
+ 'stroke-dash-corner',
+ 'stroke-dash-justify',
+ 'stroke-dashadjust',
+ 'stroke-dasharray',
+ 'stroke-dashcorner',
+ 'stroke-dashoffset',
+ 'stroke-image',
+ 'stroke-linecap',
+ 'stroke-linejoin',
+ 'stroke-miterlimit',
+ 'stroke-opacity',
+ 'stroke-origin',
+ 'stroke-position',
+ 'stroke-repeat',
+ 'stroke-size',
+ 'stroke-width',
+ 'tab-size',
+ 'table-layout',
+ 'text-align',
+ 'text-align-all',
+ 'text-align-last',
+ 'text-combine-upright',
+ 'text-decoration',
+ 'text-decoration-color',
+ 'text-decoration-line',
+ 'text-decoration-skip',
+ 'text-decoration-skip-box',
+ 'text-decoration-skip-ink',
+ 'text-decoration-skip-inset',
+ 'text-decoration-skip-self',
+ 'text-decoration-skip-spaces',
+ 'text-decoration-style',
+ 'text-decoration-thickness',
+ 'text-edge',
+ 'text-emphasis',
+ 'text-emphasis-color',
+ 'text-emphasis-position',
+ 'text-emphasis-skip',
+ 'text-emphasis-style',
+ 'text-group-align',
+ 'text-indent',
+ 'text-justify',
+ 'text-orientation',
+ 'text-overflow',
+ 'text-shadow',
+ 'text-space-collapse',
+ 'text-space-trim',
+ 'text-spacing',
+ 'text-transform',
+ 'text-underline-offset',
+ 'text-underline-position',
+ 'text-wrap',
+ 'top',
+ 'transform',
+ 'transform-box',
+ 'transform-origin',
+ 'transform-style',
+ 'transition',
+ 'transition-delay',
+ 'transition-duration',
+ 'transition-property',
+ 'transition-timing-function',
+ 'translate',
+ 'unicode-bidi',
+ 'user-select',
+ 'vertical-align',
+ 'visibility',
+ 'voice-balance',
+ 'voice-duration',
+ 'voice-family',
+ 'voice-pitch',
+ 'voice-range',
+ 'voice-rate',
+ 'voice-stress',
+ 'voice-volume',
+ 'volume',
+ 'white-space',
+ 'widows',
+ 'width',
+ 'will-change',
+ 'word-boundary-detection',
+ 'word-boundary-expansion',
+ 'word-break',
+ 'word-spacing',
+ 'word-wrap',
+ 'wrap-after',
+ 'wrap-before',
+ 'wrap-flow',
+ 'wrap-inside',
+ 'wrap-through',
+ 'writing-mode',
+ 'z-index',
+) \ No newline at end of file
diff --git a/pygments/lexers/_julia_builtins.py b/pygments/lexers/_julia_builtins.py
new file mode 100644
index 0000000..533547a
--- /dev/null
+++ b/pygments/lexers/_julia_builtins.py
@@ -0,0 +1,411 @@
+"""
+ pygments.lexers._julia_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Julia builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# operators
+# see https://github.com/JuliaLang/julia/blob/master/src/julia-parser.scm
+# Julia v1.6.0-rc1
+OPERATORS_LIST = [
+ # other
+ '->',
+ # prec-assignment
+ ':=', '$=',
+ # prec-conditional, prec-lazy-or, prec-lazy-and
+ '?', '||', '&&',
+ # prec-colon
+ ':',
+ # prec-plus
+ '$',
+ # prec-decl
+ '::',
+]
+DOTTED_OPERATORS_LIST = [
+ # prec-assignment
+ r'=', r'+=', r'-=', r'*=', r'/=', r'//=', r'\=', r'^=', r'÷=', r'%=', r'<<=',
+ r'>>=', r'>>>=', r'|=', r'&=', r'⊻=', r'≔', r'⩴', r"≕'", r'~',
+ # prec-pair
+ '=>',
+ # prec-arrow
+ r'→', r'↔', r'↚', r'↛', r'↞', r'↠', r'↢', r'↣', r'↦', r'↤', r'↮', r'⇎', r'⇍', r'⇏',
+ r'⇐', r'⇒', r'⇔', r'⇴', r'⇶', r'⇷', r'⇸', r'⇹', r'⇺', r'⇻', r'⇼', r'⇽', r'⇾', r'⇿',
+ r'⟵', r'⟶', r'⟷', r'⟹', r'⟺', r'⟻', r'⟼', r'⟽', r'⟾', r'⟿', r'⤀', r'⤁', r'⤂', r'⤃',
+ r'⤄', r'⤅', r'⤆', r'⤇', r'⤌', r'⤍', r'⤎', r'⤏', r'⤐', r'⤑', r'⤔', r'⤕', r'⤖', r'⤗',
+ r'⤘', r'⤝', r'⤞', r'⤟', r'⤠', r'⥄', r'⥅', r'⥆', r'⥇', r'⥈', r'⥊', r'⥋', r'⥎', r'⥐',
+ r'⥒', r'⥓', r'⥖', r'⥗', r'⥚', r'⥛', r'⥞', r'⥟', r'⥢', r'⥤', r'⥦', r'⥧', r'⥨', r'⥩',
+ r'⥪', r'⥫', r'⥬', r'⥭', r'⥰', r'⧴', r'⬱', r'⬰', r'⬲', r'⬳', r'⬴', r'⬵', r'⬶', r'⬷',
+ r'⬸', r'⬹', r'⬺', r'⬻', r'⬼', r'⬽', r'⬾', r'⬿', r'⭀', r'⭁', r'⭂', r'⭃', r'⭄', r'⭇',
+ r'⭈', r'⭉', r'⭊', r'⭋', r'⭌', r'←', r'→', r'⇜', r'⇝', r'↜', r'↝', r'↩', r'↪', r'↫',
+ r'↬', r'↼', r'↽', r'⇀', r'⇁', r'⇄', r'⇆', r'⇇', r'⇉', r'⇋', r'⇌', r'⇚', r'⇛', r'⇠',
+ r'⇢', r'↷', r'↶', r'↺', r'↻', r'-->', r'<--', r'<-->',
+ # prec-comparison
+ r'>', r'<', r'>=', r'≥', r'<=', r'≤', r'==', r'===', r'≡', r'!=', r'≠', r'!==',
+ r'≢', r'∈', r'∉', r'∋', r'∌', r'⊆', r'⊈', r'⊂', r'⊄', r'⊊', r'∝', r'∊', r'∍', r'∥',
+ r'∦', r'∷', r'∺', r'∻', r'∽', r'∾', r'≁', r'≃', r'≂', r'≄', r'≅', r'≆', r'≇', r'≈',
+ r'≉', r'≊', r'≋', r'≌', r'≍', r'≎', r'≐', r'≑', r'≒', r'≓', r'≖', r'≗', r'≘', r'≙',
+ r'≚', r'≛', r'≜', r'≝', r'≞', r'≟', r'≣', r'≦', r'≧', r'≨', r'≩', r'≪', r'≫', r'≬',
+ r'≭', r'≮', r'≯', r'≰', r'≱', r'≲', r'≳', r'≴', r'≵', r'≶', r'≷', r'≸', r'≹', r'≺',
+ r'≻', r'≼', r'≽', r'≾', r'≿', r'⊀', r'⊁', r'⊃', r'⊅', r'⊇', r'⊉', r'⊋', r'⊏', r'⊐',
+ r'⊑', r'⊒', r'⊜', r'⊩', r'⊬', r'⊮', r'⊰', r'⊱', r'⊲', r'⊳', r'⊴', r'⊵', r'⊶', r'⊷',
+ r'⋍', r'⋐', r'⋑', r'⋕', r'⋖', r'⋗', r'⋘', r'⋙', r'⋚', r'⋛', r'⋜', r'⋝', r'⋞', r'⋟',
+ r'⋠', r'⋡', r'⋢', r'⋣', r'⋤', r'⋥', r'⋦', r'⋧', r'⋨', r'⋩', r'⋪', r'⋫', r'⋬', r'⋭',
+ r'⋲', r'⋳', r'⋴', r'⋵', r'⋶', r'⋷', r'⋸', r'⋹', r'⋺', r'⋻', r'⋼', r'⋽', r'⋾', r'⋿',
+ r'⟈', r'⟉', r'⟒', r'⦷', r'⧀', r'⧁', r'⧡', r'⧣', r'⧤', r'⧥', r'⩦', r'⩧', r'⩪', r'⩫',
+ r'⩬', r'⩭', r'⩮', r'⩯', r'⩰', r'⩱', r'⩲', r'⩳', r'⩵', r'⩶', r'⩷', r'⩸', r'⩹', r'⩺',
+ r'⩻', r'⩼', r'⩽', r'⩾', r'⩿', r'⪀', r'⪁', r'⪂', r'⪃', r'⪄', r'⪅', r'⪆', r'⪇', r'⪈',
+ r'⪉', r'⪊', r'⪋', r'⪌', r'⪍', r'⪎', r'⪏', r'⪐', r'⪑', r'⪒', r'⪓', r'⪔', r'⪕', r'⪖',
+ r'⪗', r'⪘', r'⪙', r'⪚', r'⪛', r'⪜', r'⪝', r'⪞', r'⪟', r'⪠', r'⪡', r'⪢', r'⪣', r'⪤',
+ r'⪥', r'⪦', r'⪧', r'⪨', r'⪩', r'⪪', r'⪫', r'⪬', r'⪭', r'⪮', r'⪯', r'⪰', r'⪱', r'⪲',
+ r'⪳', r'⪴', r'⪵', r'⪶', r'⪷', r'⪸', r'⪹', r'⪺', r'⪻', r'⪼', r'⪽', r'⪾', r'⪿', r'⫀',
+ r'⫁', r'⫂', r'⫃', r'⫄', r'⫅', r'⫆', r'⫇', r'⫈', r'⫉', r'⫊', r'⫋', r'⫌', r'⫍', r'⫎',
+ r'⫏', r'⫐', r'⫑', r'⫒', r'⫓', r'⫔', r'⫕', r'⫖', r'⫗', r'⫘', r'⫙', r'⫷', r'⫸', r'⫹',
+ r'⫺', r'⊢', r'⊣', r'⟂', r'<:', r'>:',
+ # prec-pipe
+ '<|', '|>',
+ # prec-colon
+ r'…', r'⁝', r'⋮', r'⋱', r'⋰', r'⋯',
+ # prec-plus
+ r'+', r'-', r'¦', r'|', r'⊕', r'⊖', r'⊞', r'⊟', r'++', r'∪', r'∨', r'⊔', r'±', r'∓',
+ r'∔', r'∸', r'≏', r'⊎', r'⊻', r'⊽', r'⋎', r'⋓', r'⧺', r'⧻', r'⨈', r'⨢', r'⨣', r'⨤',
+ r'⨥', r'⨦', r'⨧', r'⨨', r'⨩', r'⨪', r'⨫', r'⨬', r'⨭', r'⨮', r'⨹', r'⨺', r'⩁', r'⩂',
+ r'⩅', r'⩊', r'⩌', r'⩏', r'⩐', r'⩒', r'⩔', r'⩖', r'⩗', r'⩛', r'⩝', r'⩡', r'⩢', r'⩣',
+ # prec-times
+ r'*', r'/', r'⌿', r'÷', r'%', r'&', r'⋅', r'∘', r'×', '\\', r'∩', r'∧', r'⊗', r'⊘',
+ r'⊙', r'⊚', r'⊛', r'⊠', r'⊡', r'⊓', r'∗', r'∙', r'∤', r'⅋', r'≀', r'⊼', r'⋄', r'⋆',
+ r'⋇', r'⋉', r'⋊', r'⋋', r'⋌', r'⋏', r'⋒', r'⟑', r'⦸', r'⦼', r'⦾', r'⦿', r'⧶', r'⧷',
+ r'⨇', r'⨰', r'⨱', r'⨲', r'⨳', r'⨴', r'⨵', r'⨶', r'⨷', r'⨸', r'⨻', r'⨼', r'⨽', r'⩀',
+ r'⩃', r'⩄', r'⩋', r'⩍', r'⩎', r'⩑', r'⩓', r'⩕', r'⩘', r'⩚', r'⩜', r'⩞', r'⩟', r'⩠',
+ r'⫛', r'⊍', r'▷', r'⨝', r'⟕', r'⟖', r'⟗', r'⨟',
+ # prec-rational, prec-bitshift
+ '//', '>>', '<<', '>>>',
+ # prec-power
+ r'^', r'↑', r'↓', r'⇵', r'⟰', r'⟱', r'⤈', r'⤉', r'⤊', r'⤋', r'⤒', r'⤓', r'⥉', r'⥌',
+ r'⥍', r'⥏', r'⥑', r'⥔', r'⥕', r'⥘', r'⥙', r'⥜', r'⥝', r'⥠', r'⥡', r'⥣', r'⥥', r'⥮',
+ r'⥯', r'↑', r'↓',
+ # unary-ops, excluding unary-and-binary-ops
+ '!', r'¬', r'√', r'∛', r'∜'
+]
+
+# Generated with the following in Julia v1.6.0-rc1
+'''
+#!/usr/bin/env julia
+
+import REPL.REPLCompletions
+res = String["in", "isa", "where"]
+for kw in collect(x.keyword for x in REPLCompletions.complete_keyword(""))
+ if !(contains(kw, " ") || kw == "struct")
+ push!(res, kw)
+ end
+end
+sort!(unique!(setdiff!(res, ["true", "false"])))
+foreach(x -> println("\'", x, "\',"), res)
+'''
+KEYWORD_LIST = (
+ 'baremodule',
+ 'begin',
+ 'break',
+ 'catch',
+ 'ccall',
+ 'const',
+ 'continue',
+ 'do',
+ 'else',
+ 'elseif',
+ 'end',
+ 'export',
+ 'finally',
+ 'for',
+ 'function',
+ 'global',
+ 'if',
+ 'import',
+ 'in',
+ 'isa',
+ 'let',
+ 'local',
+ 'macro',
+ 'module',
+ 'quote',
+ 'return',
+ 'try',
+ 'using',
+ 'where',
+ 'while',
+)
+
+# Generated with the following in Julia v1.6.0-rc1
+'''
+#!/usr/bin/env julia
+
+import REPL.REPLCompletions
+res = String[]
+for compl in filter!(x -> isa(x, REPLCompletions.ModuleCompletion) && (x.parent === Base || x.parent === Core),
+ REPLCompletions.completions("", 0)[1])
+ try
+ v = eval(Symbol(compl.mod))
+ if (v isa Type || v isa TypeVar) && (compl.mod != "=>")
+ push!(res, compl.mod)
+ end
+ catch e
+ end
+end
+sort!(unique!(res))
+foreach(x -> println("\'", x, "\',"), res)
+'''
+BUILTIN_LIST = (
+ 'AbstractArray',
+ 'AbstractChannel',
+ 'AbstractChar',
+ 'AbstractDict',
+ 'AbstractDisplay',
+ 'AbstractFloat',
+ 'AbstractIrrational',
+ 'AbstractMatch',
+ 'AbstractMatrix',
+ 'AbstractPattern',
+ 'AbstractRange',
+ 'AbstractSet',
+ 'AbstractString',
+ 'AbstractUnitRange',
+ 'AbstractVecOrMat',
+ 'AbstractVector',
+ 'Any',
+ 'ArgumentError',
+ 'Array',
+ 'AssertionError',
+ 'BigFloat',
+ 'BigInt',
+ 'BitArray',
+ 'BitMatrix',
+ 'BitSet',
+ 'BitVector',
+ 'Bool',
+ 'BoundsError',
+ 'CapturedException',
+ 'CartesianIndex',
+ 'CartesianIndices',
+ 'Cchar',
+ 'Cdouble',
+ 'Cfloat',
+ 'Channel',
+ 'Char',
+ 'Cint',
+ 'Cintmax_t',
+ 'Clong',
+ 'Clonglong',
+ 'Cmd',
+ 'Colon',
+ 'Complex',
+ 'ComplexF16',
+ 'ComplexF32',
+ 'ComplexF64',
+ 'ComposedFunction',
+ 'CompositeException',
+ 'Condition',
+ 'Cptrdiff_t',
+ 'Cshort',
+ 'Csize_t',
+ 'Cssize_t',
+ 'Cstring',
+ 'Cuchar',
+ 'Cuint',
+ 'Cuintmax_t',
+ 'Culong',
+ 'Culonglong',
+ 'Cushort',
+ 'Cvoid',
+ 'Cwchar_t',
+ 'Cwstring',
+ 'DataType',
+ 'DenseArray',
+ 'DenseMatrix',
+ 'DenseVecOrMat',
+ 'DenseVector',
+ 'Dict',
+ 'DimensionMismatch',
+ 'Dims',
+ 'DivideError',
+ 'DomainError',
+ 'EOFError',
+ 'Enum',
+ 'ErrorException',
+ 'Exception',
+ 'ExponentialBackOff',
+ 'Expr',
+ 'Float16',
+ 'Float32',
+ 'Float64',
+ 'Function',
+ 'GlobalRef',
+ 'HTML',
+ 'IO',
+ 'IOBuffer',
+ 'IOContext',
+ 'IOStream',
+ 'IdDict',
+ 'IndexCartesian',
+ 'IndexLinear',
+ 'IndexStyle',
+ 'InexactError',
+ 'InitError',
+ 'Int',
+ 'Int128',
+ 'Int16',
+ 'Int32',
+ 'Int64',
+ 'Int8',
+ 'Integer',
+ 'InterruptException',
+ 'InvalidStateException',
+ 'Irrational',
+ 'KeyError',
+ 'LinRange',
+ 'LineNumberNode',
+ 'LinearIndices',
+ 'LoadError',
+ 'MIME',
+ 'Matrix',
+ 'Method',
+ 'MethodError',
+ 'Missing',
+ 'MissingException',
+ 'Module',
+ 'NTuple',
+ 'NamedTuple',
+ 'Nothing',
+ 'Number',
+ 'OrdinalRange',
+ 'OutOfMemoryError',
+ 'OverflowError',
+ 'Pair',
+ 'PartialQuickSort',
+ 'PermutedDimsArray',
+ 'Pipe',
+ 'ProcessFailedException',
+ 'Ptr',
+ 'QuoteNode',
+ 'Rational',
+ 'RawFD',
+ 'ReadOnlyMemoryError',
+ 'Real',
+ 'ReentrantLock',
+ 'Ref',
+ 'Regex',
+ 'RegexMatch',
+ 'RoundingMode',
+ 'SegmentationFault',
+ 'Set',
+ 'Signed',
+ 'Some',
+ 'StackOverflowError',
+ 'StepRange',
+ 'StepRangeLen',
+ 'StridedArray',
+ 'StridedMatrix',
+ 'StridedVecOrMat',
+ 'StridedVector',
+ 'String',
+ 'StringIndexError',
+ 'SubArray',
+ 'SubString',
+ 'SubstitutionString',
+ 'Symbol',
+ 'SystemError',
+ 'Task',
+ 'TaskFailedException',
+ 'Text',
+ 'TextDisplay',
+ 'Timer',
+ 'Tuple',
+ 'Type',
+ 'TypeError',
+ 'TypeVar',
+ 'UInt',
+ 'UInt128',
+ 'UInt16',
+ 'UInt32',
+ 'UInt64',
+ 'UInt8',
+ 'UndefInitializer',
+ 'UndefKeywordError',
+ 'UndefRefError',
+ 'UndefVarError',
+ 'Union',
+ 'UnionAll',
+ 'UnitRange',
+ 'Unsigned',
+ 'Val',
+ 'Vararg',
+ 'VecElement',
+ 'VecOrMat',
+ 'Vector',
+ 'VersionNumber',
+ 'WeakKeyDict',
+ 'WeakRef',
+)
+
+# Generated with the following in Julia v1.6.0-rc1
+'''
+#!/usr/bin/env julia
+
+import REPL.REPLCompletions
+res = String["true", "false"]
+for compl in filter!(x -> isa(x, REPLCompletions.ModuleCompletion) && (x.parent === Base || x.parent === Core),
+ REPLCompletions.completions("", 0)[1])
+ try
+ v = eval(Symbol(compl.mod))
+ if !(v isa Function || v isa Type || v isa TypeVar || v isa Module || v isa Colon)
+ push!(res, compl.mod)
+ end
+ catch e
+ end
+end
+sort!(unique!(res))
+foreach(x -> println("\'", x, "\',"), res)
+'''
+LITERAL_LIST = (
+ 'ARGS',
+ 'C_NULL',
+ 'DEPOT_PATH',
+ 'ENDIAN_BOM',
+ 'ENV',
+ 'Inf',
+ 'Inf16',
+ 'Inf32',
+ 'Inf64',
+ 'InsertionSort',
+ 'LOAD_PATH',
+ 'MergeSort',
+ 'NaN',
+ 'NaN16',
+ 'NaN32',
+ 'NaN64',
+ 'PROGRAM_FILE',
+ 'QuickSort',
+ 'RoundDown',
+ 'RoundFromZero',
+ 'RoundNearest',
+ 'RoundNearestTiesAway',
+ 'RoundNearestTiesUp',
+ 'RoundToZero',
+ 'RoundUp',
+ 'VERSION',
+ 'devnull',
+ 'false',
+ 'im',
+ 'missing',
+ 'nothing',
+ 'pi',
+ 'stderr',
+ 'stdin',
+ 'stdout',
+ 'true',
+ 'undef',
+ 'π',
+ 'ℯ',
+)
diff --git a/pygments/lexers/_lasso_builtins.py b/pygments/lexers/_lasso_builtins.py
new file mode 100644
index 0000000..570c310
--- /dev/null
+++ b/pygments/lexers/_lasso_builtins.py
@@ -0,0 +1,5326 @@
+"""
+ pygments.lexers._lasso_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Built-in Lasso types, traits, methods, and members.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+BUILTINS = {
+ 'Types': (
+ 'array',
+ 'atbegin',
+ 'boolean',
+ 'bson_iter',
+ 'bson',
+ 'bytes_document_body',
+ 'bytes',
+ 'cache_server_element',
+ 'cache_server',
+ 'capture',
+ 'client_address',
+ 'client_ip',
+ 'component_container',
+ 'component_render_state',
+ 'component',
+ 'curl',
+ 'curltoken',
+ 'currency',
+ 'custom',
+ 'data_document',
+ 'database_registry',
+ 'date',
+ 'dateandtime',
+ 'dbgp_packet',
+ 'dbgp_server',
+ 'debugging_stack',
+ 'decimal',
+ 'delve',
+ 'dir',
+ 'dirdesc',
+ 'dns_response',
+ 'document_base',
+ 'document_body',
+ 'document_header',
+ 'dsinfo',
+ 'duration',
+ 'eacher',
+ 'email_compose',
+ 'email_parse',
+ 'email_pop',
+ 'email_queue_impl_base',
+ 'email_queue_impl',
+ 'email_smtp',
+ 'email_stage_impl_base',
+ 'email_stage_impl',
+ 'fastcgi_each_fcgi_param',
+ 'fastcgi_server',
+ 'fcgi_record',
+ 'fcgi_request',
+ 'file',
+ 'filedesc',
+ 'filemaker_datasource',
+ 'generateforeachkeyed',
+ 'generateforeachunkeyed',
+ 'generateseries',
+ 'hash_map',
+ 'html_atomic_element',
+ 'html_attr',
+ 'html_base',
+ 'html_binary',
+ 'html_br',
+ 'html_cdata',
+ 'html_container_element',
+ 'html_div',
+ 'html_document_body',
+ 'html_document_head',
+ 'html_eol',
+ 'html_fieldset',
+ 'html_form',
+ 'html_h1',
+ 'html_h2',
+ 'html_h3',
+ 'html_h4',
+ 'html_h5',
+ 'html_h6',
+ 'html_hr',
+ 'html_img',
+ 'html_input',
+ 'html_json',
+ 'html_label',
+ 'html_legend',
+ 'html_link',
+ 'html_meta',
+ 'html_object',
+ 'html_option',
+ 'html_raw',
+ 'html_script',
+ 'html_select',
+ 'html_span',
+ 'html_style',
+ 'html_table',
+ 'html_td',
+ 'html_text',
+ 'html_th',
+ 'html_tr',
+ 'http_document_header',
+ 'http_document',
+ 'http_error',
+ 'http_header_field',
+ 'http_server_connection_handler_globals',
+ 'http_server_connection_handler',
+ 'http_server_request_logger_thread',
+ 'http_server_web_connection',
+ 'http_server',
+ 'image',
+ 'include_cache',
+ 'inline_type',
+ 'integer',
+ 'java_jnienv',
+ 'jbyte',
+ 'jbytearray',
+ 'jchar',
+ 'jchararray',
+ 'jfieldid',
+ 'jfloat',
+ 'jint',
+ 'jmethodid',
+ 'jobject',
+ 'jshort',
+ 'json_decode',
+ 'json_encode',
+ 'json_literal',
+ 'json_object',
+ 'keyword',
+ 'lassoapp_compiledsrc_appsource',
+ 'lassoapp_compiledsrc_fileresource',
+ 'lassoapp_content_rep_halt',
+ 'lassoapp_dirsrc_appsource',
+ 'lassoapp_dirsrc_fileresource',
+ 'lassoapp_installer',
+ 'lassoapp_livesrc_appsource',
+ 'lassoapp_livesrc_fileresource',
+ 'lassoapp_long_expiring_bytes',
+ 'lassoapp_manualsrc_appsource',
+ 'lassoapp_zip_file_server',
+ 'lassoapp_zipsrc_appsource',
+ 'lassoapp_zipsrc_fileresource',
+ 'ldap',
+ 'library_thread_loader',
+ 'list_node',
+ 'list',
+ 'locale',
+ 'log_impl_base',
+ 'log_impl',
+ 'magick_image',
+ 'map_node',
+ 'map',
+ 'memberstream',
+ 'memory_session_driver_impl_entry',
+ 'memory_session_driver_impl',
+ 'memory_session_driver',
+ 'mime_reader',
+ 'mongo_client',
+ 'mongo_collection',
+ 'mongo_cursor',
+ 'mustache_ctx',
+ 'mysql_session_driver_impl',
+ 'mysql_session_driver',
+ 'net_named_pipe',
+ 'net_tcp_ssl',
+ 'net_tcp',
+ 'net_udp_packet',
+ 'net_udp',
+ 'null',
+ 'odbc_session_driver_impl',
+ 'odbc_session_driver',
+ 'opaque',
+ 'os_process',
+ 'pair_compare',
+ 'pair',
+ 'pairup',
+ 'pdf_barcode',
+ 'pdf_chunk',
+ 'pdf_color',
+ 'pdf_doc',
+ 'pdf_font',
+ 'pdf_hyphenator',
+ 'pdf_image',
+ 'pdf_list',
+ 'pdf_paragraph',
+ 'pdf_phrase',
+ 'pdf_read',
+ 'pdf_table',
+ 'pdf_text',
+ 'pdf_typebase',
+ 'percent',
+ 'portal_impl',
+ 'queriable_groupby',
+ 'queriable_grouping',
+ 'queriable_groupjoin',
+ 'queriable_join',
+ 'queriable_orderby',
+ 'queriable_orderbydescending',
+ 'queriable_select',
+ 'queriable_selectmany',
+ 'queriable_skip',
+ 'queriable_take',
+ 'queriable_thenby',
+ 'queriable_thenbydescending',
+ 'queriable_where',
+ 'queue',
+ 'raw_document_body',
+ 'regexp',
+ 'repeat',
+ 'scientific',
+ 'security_registry',
+ 'serialization_element',
+ 'serialization_object_identity_compare',
+ 'serialization_reader',
+ 'serialization_writer_ref',
+ 'serialization_writer_standin',
+ 'serialization_writer',
+ 'session_delete_expired_thread',
+ 'set',
+ 'signature',
+ 'sourcefile',
+ 'sqlite_column',
+ 'sqlite_currentrow',
+ 'sqlite_db',
+ 'sqlite_results',
+ 'sqlite_session_driver_impl_entry',
+ 'sqlite_session_driver_impl',
+ 'sqlite_session_driver',
+ 'sqlite_table',
+ 'sqlite3_stmt',
+ 'sqlite3',
+ 'staticarray',
+ 'string',
+ 'sys_process',
+ 'tag',
+ 'text_document',
+ 'tie',
+ 'timeonly',
+ 'trait',
+ 'tree_base',
+ 'tree_node',
+ 'tree_nullnode',
+ 'ucal',
+ 'usgcpu',
+ 'usgvm',
+ 'void',
+ 'web_error_atend',
+ 'web_node_base',
+ 'web_node_content_representation_css_specialized',
+ 'web_node_content_representation_html_specialized',
+ 'web_node_content_representation_js_specialized',
+ 'web_node_content_representation_xhr_container',
+ 'web_node_echo',
+ 'web_node_root',
+ 'web_request_impl',
+ 'web_request',
+ 'web_response_impl',
+ 'web_response',
+ 'web_router',
+ 'websocket_handler',
+ 'worker_pool',
+ 'xml_attr',
+ 'xml_cdatasection',
+ 'xml_characterdata',
+ 'xml_comment',
+ 'xml_document',
+ 'xml_documentfragment',
+ 'xml_documenttype',
+ 'xml_domimplementation',
+ 'xml_element',
+ 'xml_entity',
+ 'xml_entityreference',
+ 'xml_namednodemap_attr',
+ 'xml_namednodemap_ht',
+ 'xml_namednodemap',
+ 'xml_node',
+ 'xml_nodelist',
+ 'xml_notation',
+ 'xml_processinginstruction',
+ 'xml_text',
+ 'xmlstream',
+ 'zip_file_impl',
+ 'zip_file',
+ 'zip_impl',
+ 'zip',
+ ),
+ 'Traits': (
+ 'any',
+ 'formattingbase',
+ 'html_attributed',
+ 'html_element_coreattrs',
+ 'html_element_eventsattrs',
+ 'html_element_i18nattrs',
+ 'lassoapp_capabilities',
+ 'lassoapp_resource',
+ 'lassoapp_source',
+ 'queriable_asstring',
+ 'session_driver',
+ 'trait_array',
+ 'trait_asstring',
+ 'trait_backcontractible',
+ 'trait_backended',
+ 'trait_backexpandable',
+ 'trait_close',
+ 'trait_contractible',
+ 'trait_decompose_assignment',
+ 'trait_doubleended',
+ 'trait_each_sub',
+ 'trait_encodeurl',
+ 'trait_endedfullymutable',
+ 'trait_expandable',
+ 'trait_file',
+ 'trait_finite',
+ 'trait_finiteforeach',
+ 'trait_foreach',
+ 'trait_foreachtextelement',
+ 'trait_frontcontractible',
+ 'trait_frontended',
+ 'trait_frontexpandable',
+ 'trait_fullymutable',
+ 'trait_generator',
+ 'trait_generatorcentric',
+ 'trait_hashable',
+ 'trait_json_serialize',
+ 'trait_keyed',
+ 'trait_keyedfinite',
+ 'trait_keyedforeach',
+ 'trait_keyedmutable',
+ 'trait_list',
+ 'trait_map',
+ 'trait_net',
+ 'trait_pathcomponents',
+ 'trait_positionallykeyed',
+ 'trait_positionallysearchable',
+ 'trait_queriable',
+ 'trait_queriablelambda',
+ 'trait_readbytes',
+ 'trait_readstring',
+ 'trait_scalar',
+ 'trait_searchable',
+ 'trait_serializable',
+ 'trait_setencoding',
+ 'trait_setoperations',
+ 'trait_stack',
+ 'trait_treenode',
+ 'trait_writebytes',
+ 'trait_writestring',
+ 'trait_xml_elementcompat',
+ 'trait_xml_nodecompat',
+ 'web_connection',
+ 'web_node_container',
+ 'web_node_content_css_specialized',
+ 'web_node_content_document',
+ 'web_node_content_html_specialized',
+ 'web_node_content_js_specialized',
+ 'web_node_content_json_specialized',
+ 'web_node_content_representation',
+ 'web_node_content',
+ 'web_node_postable',
+ 'web_node',
+ ),
+ 'Unbound Methods': (
+ 'abort_clear',
+ 'abort_now',
+ 'abort',
+ 'action_param',
+ 'action_params',
+ 'action_statement',
+ 'admin_authorization',
+ 'admin_currentgroups',
+ 'admin_currentuserid',
+ 'admin_currentusername',
+ 'admin_getpref',
+ 'admin_initialize',
+ 'admin_lassoservicepath',
+ 'admin_removepref',
+ 'admin_setpref',
+ 'admin_userexists',
+ 'all',
+ 'auth_admin',
+ 'auth_check',
+ 'auth_custom',
+ 'auth_group',
+ 'auth_prompt',
+ 'auth_user',
+ 'bom_utf16be',
+ 'bom_utf16le',
+ 'bom_utf32be',
+ 'bom_utf32le',
+ 'bom_utf8',
+ 'bw',
+ 'capture_nearestloopabort',
+ 'capture_nearestloopcontinue',
+ 'capture_nearestloopcount',
+ 'checked',
+ 'cipher_decrypt_private',
+ 'cipher_decrypt_public',
+ 'cipher_decrypt',
+ 'cipher_digest',
+ 'cipher_encrypt_private',
+ 'cipher_encrypt_public',
+ 'cipher_encrypt',
+ 'cipher_generate_key',
+ 'cipher_hmac',
+ 'cipher_keylength',
+ 'cipher_list',
+ 'cipher_open',
+ 'cipher_seal',
+ 'cipher_sign',
+ 'cipher_verify',
+ 'client_addr',
+ 'client_authorization',
+ 'client_browser',
+ 'client_contentlength',
+ 'client_contenttype',
+ 'client_cookielist',
+ 'client_cookies',
+ 'client_encoding',
+ 'client_formmethod',
+ 'client_getargs',
+ 'client_getparam',
+ 'client_getparams',
+ 'client_headers',
+ 'client_integertoip',
+ 'client_iptointeger',
+ 'client_password',
+ 'client_postargs',
+ 'client_postparam',
+ 'client_postparams',
+ 'client_type',
+ 'client_url',
+ 'client_username',
+ 'cn',
+ 'column_name',
+ 'column_names',
+ 'column_type',
+ 'column',
+ 'compress',
+ 'content_addheader',
+ 'content_body',
+ 'content_encoding',
+ 'content_header',
+ 'content_replaceheader',
+ 'content_type',
+ 'cookie_set',
+ 'cookie',
+ 'curl_easy_cleanup',
+ 'curl_easy_duphandle',
+ 'curl_easy_getinfo',
+ 'curl_easy_init',
+ 'curl_easy_reset',
+ 'curl_easy_setopt',
+ 'curl_easy_strerror',
+ 'curl_getdate',
+ 'curl_http_version_1_0',
+ 'curl_http_version_1_1',
+ 'curl_http_version_none',
+ 'curl_ipresolve_v4',
+ 'curl_ipresolve_v6',
+ 'curl_ipresolve_whatever',
+ 'curl_multi_perform',
+ 'curl_multi_result',
+ 'curl_netrc_ignored',
+ 'curl_netrc_optional',
+ 'curl_netrc_required',
+ 'curl_sslversion_default',
+ 'curl_sslversion_sslv2',
+ 'curl_sslversion_sslv3',
+ 'curl_sslversion_tlsv1',
+ 'curl_version_asynchdns',
+ 'curl_version_debug',
+ 'curl_version_gssnegotiate',
+ 'curl_version_idn',
+ 'curl_version_info',
+ 'curl_version_ipv6',
+ 'curl_version_kerberos4',
+ 'curl_version_largefile',
+ 'curl_version_libz',
+ 'curl_version_ntlm',
+ 'curl_version_spnego',
+ 'curl_version_ssl',
+ 'curl_version',
+ 'curlauth_any',
+ 'curlauth_anysafe',
+ 'curlauth_basic',
+ 'curlauth_digest',
+ 'curlauth_gssnegotiate',
+ 'curlauth_none',
+ 'curlauth_ntlm',
+ 'curle_aborted_by_callback',
+ 'curle_bad_calling_order',
+ 'curle_bad_content_encoding',
+ 'curle_bad_download_resume',
+ 'curle_bad_function_argument',
+ 'curle_bad_password_entered',
+ 'curle_couldnt_connect',
+ 'curle_couldnt_resolve_host',
+ 'curle_couldnt_resolve_proxy',
+ 'curle_failed_init',
+ 'curle_file_couldnt_read_file',
+ 'curle_filesize_exceeded',
+ 'curle_ftp_access_denied',
+ 'curle_ftp_cant_get_host',
+ 'curle_ftp_cant_reconnect',
+ 'curle_ftp_couldnt_get_size',
+ 'curle_ftp_couldnt_retr_file',
+ 'curle_ftp_couldnt_set_ascii',
+ 'curle_ftp_couldnt_set_binary',
+ 'curle_ftp_couldnt_use_rest',
+ 'curle_ftp_port_failed',
+ 'curle_ftp_quote_error',
+ 'curle_ftp_ssl_failed',
+ 'curle_ftp_user_password_incorrect',
+ 'curle_ftp_weird_227_format',
+ 'curle_ftp_weird_pass_reply',
+ 'curle_ftp_weird_pasv_reply',
+ 'curle_ftp_weird_server_reply',
+ 'curle_ftp_weird_user_reply',
+ 'curle_ftp_write_error',
+ 'curle_function_not_found',
+ 'curle_got_nothing',
+ 'curle_http_post_error',
+ 'curle_http_range_error',
+ 'curle_http_returned_error',
+ 'curle_interface_failed',
+ 'curle_ldap_cannot_bind',
+ 'curle_ldap_invalid_url',
+ 'curle_ldap_search_failed',
+ 'curle_library_not_found',
+ 'curle_login_denied',
+ 'curle_malformat_user',
+ 'curle_obsolete',
+ 'curle_ok',
+ 'curle_operation_timeouted',
+ 'curle_out_of_memory',
+ 'curle_partial_file',
+ 'curle_read_error',
+ 'curle_recv_error',
+ 'curle_send_error',
+ 'curle_send_fail_rewind',
+ 'curle_share_in_use',
+ 'curle_ssl_cacert',
+ 'curle_ssl_certproblem',
+ 'curle_ssl_cipher',
+ 'curle_ssl_connect_error',
+ 'curle_ssl_engine_initfailed',
+ 'curle_ssl_engine_notfound',
+ 'curle_ssl_engine_setfailed',
+ 'curle_ssl_peer_certificate',
+ 'curle_telnet_option_syntax',
+ 'curle_too_many_redirects',
+ 'curle_unknown_telnet_option',
+ 'curle_unsupported_protocol',
+ 'curle_url_malformat_user',
+ 'curle_url_malformat',
+ 'curle_write_error',
+ 'curlftpauth_default',
+ 'curlftpauth_ssl',
+ 'curlftpauth_tls',
+ 'curlftpssl_all',
+ 'curlftpssl_control',
+ 'curlftpssl_last',
+ 'curlftpssl_none',
+ 'curlftpssl_try',
+ 'curlinfo_connect_time',
+ 'curlinfo_content_length_download',
+ 'curlinfo_content_length_upload',
+ 'curlinfo_content_type',
+ 'curlinfo_effective_url',
+ 'curlinfo_filetime',
+ 'curlinfo_header_size',
+ 'curlinfo_http_connectcode',
+ 'curlinfo_httpauth_avail',
+ 'curlinfo_namelookup_time',
+ 'curlinfo_num_connects',
+ 'curlinfo_os_errno',
+ 'curlinfo_pretransfer_time',
+ 'curlinfo_proxyauth_avail',
+ 'curlinfo_redirect_count',
+ 'curlinfo_redirect_time',
+ 'curlinfo_request_size',
+ 'curlinfo_response_code',
+ 'curlinfo_size_download',
+ 'curlinfo_size_upload',
+ 'curlinfo_speed_download',
+ 'curlinfo_speed_upload',
+ 'curlinfo_ssl_engines',
+ 'curlinfo_ssl_verifyresult',
+ 'curlinfo_starttransfer_time',
+ 'curlinfo_total_time',
+ 'curlmsg_done',
+ 'curlopt_autoreferer',
+ 'curlopt_buffersize',
+ 'curlopt_cainfo',
+ 'curlopt_capath',
+ 'curlopt_connecttimeout',
+ 'curlopt_cookie',
+ 'curlopt_cookiefile',
+ 'curlopt_cookiejar',
+ 'curlopt_cookiesession',
+ 'curlopt_crlf',
+ 'curlopt_customrequest',
+ 'curlopt_dns_use_global_cache',
+ 'curlopt_egdsocket',
+ 'curlopt_encoding',
+ 'curlopt_failonerror',
+ 'curlopt_filetime',
+ 'curlopt_followlocation',
+ 'curlopt_forbid_reuse',
+ 'curlopt_fresh_connect',
+ 'curlopt_ftp_account',
+ 'curlopt_ftp_create_missing_dirs',
+ 'curlopt_ftp_response_timeout',
+ 'curlopt_ftp_ssl',
+ 'curlopt_ftp_use_eprt',
+ 'curlopt_ftp_use_epsv',
+ 'curlopt_ftpappend',
+ 'curlopt_ftplistonly',
+ 'curlopt_ftpport',
+ 'curlopt_ftpsslauth',
+ 'curlopt_header',
+ 'curlopt_http_version',
+ 'curlopt_http200aliases',
+ 'curlopt_httpauth',
+ 'curlopt_httpget',
+ 'curlopt_httpheader',
+ 'curlopt_httppost',
+ 'curlopt_httpproxytunnel',
+ 'curlopt_infilesize_large',
+ 'curlopt_infilesize',
+ 'curlopt_interface',
+ 'curlopt_ipresolve',
+ 'curlopt_krb4level',
+ 'curlopt_low_speed_limit',
+ 'curlopt_low_speed_time',
+ 'curlopt_mail_from',
+ 'curlopt_mail_rcpt',
+ 'curlopt_maxconnects',
+ 'curlopt_maxfilesize_large',
+ 'curlopt_maxfilesize',
+ 'curlopt_maxredirs',
+ 'curlopt_netrc_file',
+ 'curlopt_netrc',
+ 'curlopt_nobody',
+ 'curlopt_noprogress',
+ 'curlopt_port',
+ 'curlopt_post',
+ 'curlopt_postfields',
+ 'curlopt_postfieldsize_large',
+ 'curlopt_postfieldsize',
+ 'curlopt_postquote',
+ 'curlopt_prequote',
+ 'curlopt_proxy',
+ 'curlopt_proxyauth',
+ 'curlopt_proxyport',
+ 'curlopt_proxytype',
+ 'curlopt_proxyuserpwd',
+ 'curlopt_put',
+ 'curlopt_quote',
+ 'curlopt_random_file',
+ 'curlopt_range',
+ 'curlopt_readdata',
+ 'curlopt_referer',
+ 'curlopt_resume_from_large',
+ 'curlopt_resume_from',
+ 'curlopt_ssl_cipher_list',
+ 'curlopt_ssl_verifyhost',
+ 'curlopt_ssl_verifypeer',
+ 'curlopt_sslcert',
+ 'curlopt_sslcerttype',
+ 'curlopt_sslengine_default',
+ 'curlopt_sslengine',
+ 'curlopt_sslkey',
+ 'curlopt_sslkeypasswd',
+ 'curlopt_sslkeytype',
+ 'curlopt_sslversion',
+ 'curlopt_tcp_nodelay',
+ 'curlopt_timecondition',
+ 'curlopt_timeout',
+ 'curlopt_timevalue',
+ 'curlopt_transfertext',
+ 'curlopt_unrestricted_auth',
+ 'curlopt_upload',
+ 'curlopt_url',
+ 'curlopt_use_ssl',
+ 'curlopt_useragent',
+ 'curlopt_userpwd',
+ 'curlopt_verbose',
+ 'curlopt_writedata',
+ 'curlproxy_http',
+ 'curlproxy_socks4',
+ 'curlproxy_socks5',
+ 'database_adddefaultsqlitehost',
+ 'database_database',
+ 'database_initialize',
+ 'database_name',
+ 'database_qs',
+ 'database_table_database_tables',
+ 'database_table_datasource_databases',
+ 'database_table_datasource_hosts',
+ 'database_table_datasources',
+ 'database_table_table_fields',
+ 'database_util_cleanpath',
+ 'dbgp_stop_stack_name',
+ 'debugging_break',
+ 'debugging_breakpoint_get',
+ 'debugging_breakpoint_list',
+ 'debugging_breakpoint_remove',
+ 'debugging_breakpoint_set',
+ 'debugging_breakpoint_update',
+ 'debugging_context_locals',
+ 'debugging_context_self',
+ 'debugging_context_vars',
+ 'debugging_detach',
+ 'debugging_enabled',
+ 'debugging_get_context',
+ 'debugging_get_stack',
+ 'debugging_run',
+ 'debugging_step_in',
+ 'debugging_step_out',
+ 'debugging_step_over',
+ 'debugging_stop',
+ 'debugging_terminate',
+ 'decimal_random',
+ 'decompress',
+ 'decrypt_blowfish',
+ 'define_atbegin',
+ 'define_atend',
+ 'dns_default',
+ 'dns_lookup',
+ 'document',
+ 'email_attachment_mime_type',
+ 'email_batch',
+ 'email_digestchallenge',
+ 'email_digestresponse',
+ 'email_extract',
+ 'email_findemails',
+ 'email_fix_address_list',
+ 'email_fix_address',
+ 'email_fs_error_clean',
+ 'email_immediate',
+ 'email_initialize',
+ 'email_merge',
+ 'email_mxlookup',
+ 'email_pop_priv_extract',
+ 'email_pop_priv_quote',
+ 'email_pop_priv_substring',
+ 'email_queue',
+ 'email_result',
+ 'email_safeemail',
+ 'email_send',
+ 'email_status',
+ 'email_token',
+ 'email_translatebreakstocrlf',
+ 'encode_qheader',
+ 'encoding_iso88591',
+ 'encoding_utf8',
+ 'encrypt_blowfish',
+ 'encrypt_crammd5',
+ 'encrypt_hmac',
+ 'encrypt_md5',
+ 'eol',
+ 'eq',
+ 'error_code_aborted',
+ 'error_code_dividebyzero',
+ 'error_code_filenotfound',
+ 'error_code_invalidparameter',
+ 'error_code_methodnotfound',
+ 'error_code_networkerror',
+ 'error_code_noerror',
+ 'error_code_resnotfound',
+ 'error_code_runtimeassertion',
+ 'error_code',
+ 'error_msg_aborted',
+ 'error_msg_dividebyzero',
+ 'error_msg_filenotfound',
+ 'error_msg_invalidparameter',
+ 'error_msg_methodnotfound',
+ 'error_msg_networkerror',
+ 'error_msg_noerror',
+ 'error_msg_resnotfound',
+ 'error_msg_runtimeassertion',
+ 'error_msg',
+ 'error_obj',
+ 'error_pop',
+ 'error_push',
+ 'error_reset',
+ 'error_stack',
+ 'escape_tag',
+ 'evdns_resolve_ipv4',
+ 'evdns_resolve_ipv6',
+ 'evdns_resolve_reverse_ipv6',
+ 'evdns_resolve_reverse',
+ 'ew',
+ 'fail_if',
+ 'fail_ifnot',
+ 'fail_now',
+ 'fail',
+ 'failure_clear',
+ 'fastcgi_createfcgirequest',
+ 'fastcgi_handlecon',
+ 'fastcgi_handlereq',
+ 'fastcgi_initialize',
+ 'fastcgi_initiate_request',
+ 'fcgi_abort_request',
+ 'fcgi_authorize',
+ 'fcgi_begin_request',
+ 'fcgi_bodychunksize',
+ 'fcgi_cant_mpx_conn',
+ 'fcgi_data',
+ 'fcgi_end_request',
+ 'fcgi_filter',
+ 'fcgi_get_values_result',
+ 'fcgi_get_values',
+ 'fcgi_keep_conn',
+ 'fcgi_makeendrequestbody',
+ 'fcgi_makestdoutbody',
+ 'fcgi_max_conns',
+ 'fcgi_max_reqs',
+ 'fcgi_mpxs_conns',
+ 'fcgi_null_request_id',
+ 'fcgi_overloaded',
+ 'fcgi_params',
+ 'fcgi_read_timeout_seconds',
+ 'fcgi_readparam',
+ 'fcgi_request_complete',
+ 'fcgi_responder',
+ 'fcgi_stderr',
+ 'fcgi_stdin',
+ 'fcgi_stdout',
+ 'fcgi_unknown_role',
+ 'fcgi_unknown_type',
+ 'fcgi_version_1',
+ 'fcgi_x_stdin',
+ 'field_name',
+ 'field_names',
+ 'field',
+ 'file_copybuffersize',
+ 'file_defaultencoding',
+ 'file_forceroot',
+ 'file_modechar',
+ 'file_modeline',
+ 'file_stderr',
+ 'file_stdin',
+ 'file_stdout',
+ 'file_tempfile',
+ 'filemakerds_initialize',
+ 'filemakerds',
+ 'found_count',
+ 'ft',
+ 'ftp_deletefile',
+ 'ftp_getdata',
+ 'ftp_getfile',
+ 'ftp_getlisting',
+ 'ftp_putdata',
+ 'ftp_putfile',
+ 'full',
+ 'generateforeach',
+ 'gt',
+ 'gte',
+ 'handle_failure',
+ 'handle',
+ 'hash_primes',
+ 'html_comment',
+ 'http_char_colon',
+ 'http_char_cr',
+ 'http_char_htab',
+ 'http_char_lf',
+ 'http_char_question',
+ 'http_char_space',
+ 'http_default_files',
+ 'http_read_headers',
+ 'http_read_timeout_secs',
+ 'http_server_apps_path',
+ 'http_server_request_logger',
+ 'if_empty',
+ 'if_false',
+ 'if_null',
+ 'if_true',
+ 'include_cache_compare',
+ 'include_currentpath',
+ 'include_filepath',
+ 'include_localpath',
+ 'include_once',
+ 'include_path',
+ 'include_raw',
+ 'include_url',
+ 'include',
+ 'includes',
+ 'inline_colinfo_name_pos',
+ 'inline_colinfo_type_pos',
+ 'inline_colinfo_valuelist_pos',
+ 'inline_columninfo_pos',
+ 'inline_foundcount_pos',
+ 'inline_namedget',
+ 'inline_namedput',
+ 'inline_resultrows_pos',
+ 'inline_scopeget',
+ 'inline_scopepop',
+ 'inline_scopepush',
+ 'inline',
+ 'integer_bitor',
+ 'integer_random',
+ 'io_dir_dt_blk',
+ 'io_dir_dt_chr',
+ 'io_dir_dt_dir',
+ 'io_dir_dt_fifo',
+ 'io_dir_dt_lnk',
+ 'io_dir_dt_reg',
+ 'io_dir_dt_sock',
+ 'io_dir_dt_unknown',
+ 'io_dir_dt_wht',
+ 'io_file_access',
+ 'io_file_chdir',
+ 'io_file_chmod',
+ 'io_file_chown',
+ 'io_file_dirname',
+ 'io_file_f_dupfd',
+ 'io_file_f_getfd',
+ 'io_file_f_getfl',
+ 'io_file_f_getlk',
+ 'io_file_f_rdlck',
+ 'io_file_f_setfd',
+ 'io_file_f_setfl',
+ 'io_file_f_setlk',
+ 'io_file_f_setlkw',
+ 'io_file_f_test',
+ 'io_file_f_tlock',
+ 'io_file_f_ulock',
+ 'io_file_f_unlck',
+ 'io_file_f_wrlck',
+ 'io_file_fd_cloexec',
+ 'io_file_fioasync',
+ 'io_file_fioclex',
+ 'io_file_fiodtype',
+ 'io_file_fiogetown',
+ 'io_file_fionbio',
+ 'io_file_fionclex',
+ 'io_file_fionread',
+ 'io_file_fiosetown',
+ 'io_file_getcwd',
+ 'io_file_lchown',
+ 'io_file_link',
+ 'io_file_lockf',
+ 'io_file_lstat_atime',
+ 'io_file_lstat_mode',
+ 'io_file_lstat_mtime',
+ 'io_file_lstat_size',
+ 'io_file_mkdir',
+ 'io_file_mkfifo',
+ 'io_file_mkstemp',
+ 'io_file_o_append',
+ 'io_file_o_async',
+ 'io_file_o_creat',
+ 'io_file_o_excl',
+ 'io_file_o_exlock',
+ 'io_file_o_fsync',
+ 'io_file_o_nofollow',
+ 'io_file_o_nonblock',
+ 'io_file_o_rdonly',
+ 'io_file_o_rdwr',
+ 'io_file_o_shlock',
+ 'io_file_o_sync',
+ 'io_file_o_trunc',
+ 'io_file_o_wronly',
+ 'io_file_pipe',
+ 'io_file_readlink',
+ 'io_file_realpath',
+ 'io_file_remove',
+ 'io_file_rename',
+ 'io_file_rmdir',
+ 'io_file_s_ifblk',
+ 'io_file_s_ifchr',
+ 'io_file_s_ifdir',
+ 'io_file_s_ififo',
+ 'io_file_s_iflnk',
+ 'io_file_s_ifmt',
+ 'io_file_s_ifreg',
+ 'io_file_s_ifsock',
+ 'io_file_s_irgrp',
+ 'io_file_s_iroth',
+ 'io_file_s_irusr',
+ 'io_file_s_irwxg',
+ 'io_file_s_irwxo',
+ 'io_file_s_irwxu',
+ 'io_file_s_isgid',
+ 'io_file_s_isuid',
+ 'io_file_s_isvtx',
+ 'io_file_s_iwgrp',
+ 'io_file_s_iwoth',
+ 'io_file_s_iwusr',
+ 'io_file_s_ixgrp',
+ 'io_file_s_ixoth',
+ 'io_file_s_ixusr',
+ 'io_file_seek_cur',
+ 'io_file_seek_end',
+ 'io_file_seek_set',
+ 'io_file_stat_atime',
+ 'io_file_stat_mode',
+ 'io_file_stat_mtime',
+ 'io_file_stat_size',
+ 'io_file_stderr',
+ 'io_file_stdin',
+ 'io_file_stdout',
+ 'io_file_symlink',
+ 'io_file_tempnam',
+ 'io_file_truncate',
+ 'io_file_umask',
+ 'io_file_unlink',
+ 'io_net_accept',
+ 'io_net_af_inet',
+ 'io_net_af_inet6',
+ 'io_net_af_unix',
+ 'io_net_bind',
+ 'io_net_connect',
+ 'io_net_getpeername',
+ 'io_net_getsockname',
+ 'io_net_ipproto_ip',
+ 'io_net_ipproto_udp',
+ 'io_net_listen',
+ 'io_net_msg_oob',
+ 'io_net_msg_peek',
+ 'io_net_msg_waitall',
+ 'io_net_recv',
+ 'io_net_recvfrom',
+ 'io_net_send',
+ 'io_net_sendto',
+ 'io_net_shut_rd',
+ 'io_net_shut_rdwr',
+ 'io_net_shut_wr',
+ 'io_net_shutdown',
+ 'io_net_so_acceptconn',
+ 'io_net_so_broadcast',
+ 'io_net_so_debug',
+ 'io_net_so_dontroute',
+ 'io_net_so_error',
+ 'io_net_so_keepalive',
+ 'io_net_so_linger',
+ 'io_net_so_oobinline',
+ 'io_net_so_rcvbuf',
+ 'io_net_so_rcvlowat',
+ 'io_net_so_rcvtimeo',
+ 'io_net_so_reuseaddr',
+ 'io_net_so_sndbuf',
+ 'io_net_so_sndlowat',
+ 'io_net_so_sndtimeo',
+ 'io_net_so_timestamp',
+ 'io_net_so_type',
+ 'io_net_so_useloopback',
+ 'io_net_sock_dgram',
+ 'io_net_sock_raw',
+ 'io_net_sock_rdm',
+ 'io_net_sock_seqpacket',
+ 'io_net_sock_stream',
+ 'io_net_socket',
+ 'io_net_sol_socket',
+ 'io_net_ssl_accept',
+ 'io_net_ssl_begin',
+ 'io_net_ssl_connect',
+ 'io_net_ssl_end',
+ 'io_net_ssl_error',
+ 'io_net_ssl_errorstring',
+ 'io_net_ssl_funcerrorstring',
+ 'io_net_ssl_liberrorstring',
+ 'io_net_ssl_read',
+ 'io_net_ssl_reasonerrorstring',
+ 'io_net_ssl_setacceptstate',
+ 'io_net_ssl_setconnectstate',
+ 'io_net_ssl_setverifylocations',
+ 'io_net_ssl_shutdown',
+ 'io_net_ssl_usecertificatechainfile',
+ 'io_net_ssl_useprivatekeyfile',
+ 'io_net_ssl_write',
+ 'java_jvm_create',
+ 'java_jvm_getenv',
+ 'jdbc_initialize',
+ 'json_back_slash',
+ 'json_back_space',
+ 'json_close_array',
+ 'json_close_object',
+ 'json_colon',
+ 'json_comma',
+ 'json_consume_array',
+ 'json_consume_object',
+ 'json_consume_string',
+ 'json_consume_token',
+ 'json_cr',
+ 'json_debug',
+ 'json_deserialize',
+ 'json_e_lower',
+ 'json_e_upper',
+ 'json_f_lower',
+ 'json_form_feed',
+ 'json_forward_slash',
+ 'json_lf',
+ 'json_n_lower',
+ 'json_negative',
+ 'json_open_array',
+ 'json_open_object',
+ 'json_period',
+ 'json_positive',
+ 'json_quote_double',
+ 'json_rpccall',
+ 'json_serialize',
+ 'json_t_lower',
+ 'json_tab',
+ 'json_white_space',
+ 'keycolumn_name',
+ 'keycolumn_value',
+ 'keyfield_name',
+ 'keyfield_value',
+ 'lasso_currentaction',
+ 'lasso_errorreporting',
+ 'lasso_executiontimelimit',
+ 'lasso_methodexists',
+ 'lasso_tagexists',
+ 'lasso_uniqueid',
+ 'lasso_version',
+ 'lassoapp_current_app',
+ 'lassoapp_current_include',
+ 'lassoapp_do_with_include',
+ 'lassoapp_exists',
+ 'lassoapp_find_missing_file',
+ 'lassoapp_format_mod_date',
+ 'lassoapp_get_capabilities_name',
+ 'lassoapp_include_current',
+ 'lassoapp_include',
+ 'lassoapp_initialize_db',
+ 'lassoapp_initialize',
+ 'lassoapp_invoke_resource',
+ 'lassoapp_issourcefileextension',
+ 'lassoapp_link',
+ 'lassoapp_load_module',
+ 'lassoapp_mime_get',
+ 'lassoapp_mime_type_appcache',
+ 'lassoapp_mime_type_css',
+ 'lassoapp_mime_type_csv',
+ 'lassoapp_mime_type_doc',
+ 'lassoapp_mime_type_docx',
+ 'lassoapp_mime_type_eof',
+ 'lassoapp_mime_type_eot',
+ 'lassoapp_mime_type_gif',
+ 'lassoapp_mime_type_html',
+ 'lassoapp_mime_type_ico',
+ 'lassoapp_mime_type_jpg',
+ 'lassoapp_mime_type_js',
+ 'lassoapp_mime_type_lasso',
+ 'lassoapp_mime_type_map',
+ 'lassoapp_mime_type_pdf',
+ 'lassoapp_mime_type_png',
+ 'lassoapp_mime_type_ppt',
+ 'lassoapp_mime_type_rss',
+ 'lassoapp_mime_type_svg',
+ 'lassoapp_mime_type_swf',
+ 'lassoapp_mime_type_tif',
+ 'lassoapp_mime_type_ttf',
+ 'lassoapp_mime_type_txt',
+ 'lassoapp_mime_type_woff',
+ 'lassoapp_mime_type_xaml',
+ 'lassoapp_mime_type_xap',
+ 'lassoapp_mime_type_xbap',
+ 'lassoapp_mime_type_xhr',
+ 'lassoapp_mime_type_xml',
+ 'lassoapp_mime_type_zip',
+ 'lassoapp_path_to_method_name',
+ 'lassoapp_settingsdb',
+ 'layout_name',
+ 'lcapi_datasourceadd',
+ 'lcapi_datasourcecloseconnection',
+ 'lcapi_datasourcedelete',
+ 'lcapi_datasourceduplicate',
+ 'lcapi_datasourceexecsql',
+ 'lcapi_datasourcefindall',
+ 'lcapi_datasourceimage',
+ 'lcapi_datasourceinfo',
+ 'lcapi_datasourceinit',
+ 'lcapi_datasourcematchesname',
+ 'lcapi_datasourcenames',
+ 'lcapi_datasourcenothing',
+ 'lcapi_datasourceopand',
+ 'lcapi_datasourceopany',
+ 'lcapi_datasourceopbw',
+ 'lcapi_datasourceopct',
+ 'lcapi_datasourceopeq',
+ 'lcapi_datasourceopew',
+ 'lcapi_datasourceopft',
+ 'lcapi_datasourceopgt',
+ 'lcapi_datasourceopgteq',
+ 'lcapi_datasourceopin',
+ 'lcapi_datasourceoplt',
+ 'lcapi_datasourceoplteq',
+ 'lcapi_datasourceopnbw',
+ 'lcapi_datasourceopnct',
+ 'lcapi_datasourceopneq',
+ 'lcapi_datasourceopnew',
+ 'lcapi_datasourceopnin',
+ 'lcapi_datasourceopno',
+ 'lcapi_datasourceopnot',
+ 'lcapi_datasourceopnrx',
+ 'lcapi_datasourceopor',
+ 'lcapi_datasourceoprx',
+ 'lcapi_datasourcepreparesql',
+ 'lcapi_datasourceprotectionnone',
+ 'lcapi_datasourceprotectionreadonly',
+ 'lcapi_datasourcerandom',
+ 'lcapi_datasourceschemanames',
+ 'lcapi_datasourcescripts',
+ 'lcapi_datasourcesearch',
+ 'lcapi_datasourcesortascending',
+ 'lcapi_datasourcesortcustom',
+ 'lcapi_datasourcesortdescending',
+ 'lcapi_datasourcetablenames',
+ 'lcapi_datasourceterm',
+ 'lcapi_datasourcetickle',
+ 'lcapi_datasourcetypeblob',
+ 'lcapi_datasourcetypeboolean',
+ 'lcapi_datasourcetypedate',
+ 'lcapi_datasourcetypedecimal',
+ 'lcapi_datasourcetypeinteger',
+ 'lcapi_datasourcetypestring',
+ 'lcapi_datasourceunpreparesql',
+ 'lcapi_datasourceupdate',
+ 'lcapi_fourchartointeger',
+ 'lcapi_listdatasources',
+ 'lcapi_loadmodule',
+ 'lcapi_loadmodules',
+ 'lcapi_updatedatasourceslist',
+ 'ldap_scope_base',
+ 'ldap_scope_children',
+ 'ldap_scope_onelevel',
+ 'ldap_scope_subtree',
+ 'library_once',
+ 'library',
+ 'ljapi_initialize',
+ 'locale_availablelocales',
+ 'locale_canada',
+ 'locale_canadafrench',
+ 'locale_china',
+ 'locale_chinese',
+ 'locale_default',
+ 'locale_english',
+ 'locale_format_style_date_time',
+ 'locale_format_style_default',
+ 'locale_format_style_full',
+ 'locale_format_style_long',
+ 'locale_format_style_medium',
+ 'locale_format_style_none',
+ 'locale_format_style_short',
+ 'locale_format',
+ 'locale_france',
+ 'locale_french',
+ 'locale_german',
+ 'locale_germany',
+ 'locale_isocountries',
+ 'locale_isolanguages',
+ 'locale_italian',
+ 'locale_italy',
+ 'locale_japan',
+ 'locale_japanese',
+ 'locale_korea',
+ 'locale_korean',
+ 'locale_prc',
+ 'locale_setdefault',
+ 'locale_simplifiedchinese',
+ 'locale_taiwan',
+ 'locale_traditionalchinese',
+ 'locale_uk',
+ 'locale_us',
+ 'log_always',
+ 'log_critical',
+ 'log_deprecated',
+ 'log_destination_console',
+ 'log_destination_database',
+ 'log_destination_file',
+ 'log_detail',
+ 'log_initialize',
+ 'log_level_critical',
+ 'log_level_deprecated',
+ 'log_level_detail',
+ 'log_level_sql',
+ 'log_level_warning',
+ 'log_max_file_size',
+ 'log_setdestination',
+ 'log_sql',
+ 'log_trim_file_size',
+ 'log_warning',
+ 'log',
+ 'loop_abort',
+ 'loop_continue',
+ 'loop_count',
+ 'loop_key_pop',
+ 'loop_key_push',
+ 'loop_key',
+ 'loop_pop',
+ 'loop_push',
+ 'loop_value_pop',
+ 'loop_value_push',
+ 'loop_value',
+ 'loop',
+ 'lt',
+ 'lte',
+ 'main_thread_only',
+ 'max',
+ 'maxrecords_value',
+ 'median',
+ 'method_name',
+ 'micros',
+ 'millis',
+ 'min',
+ 'minimal',
+ 'mongo_insert_continue_on_error',
+ 'mongo_insert_no_validate',
+ 'mongo_insert_none',
+ 'mongo_query_await_data',
+ 'mongo_query_exhaust',
+ 'mongo_query_no_cursor_timeout',
+ 'mongo_query_none',
+ 'mongo_query_oplog_replay',
+ 'mongo_query_partial',
+ 'mongo_query_slave_ok',
+ 'mongo_query_tailable_cursor',
+ 'mongo_remove_none',
+ 'mongo_remove_single_remove',
+ 'mongo_update_multi_update',
+ 'mongo_update_no_validate',
+ 'mongo_update_none',
+ 'mongo_update_upsert',
+ 'mustache_compile_file',
+ 'mustache_compile_string',
+ 'mustache_include',
+ 'mysqlds',
+ 'namespace_global',
+ 'namespace_import',
+ 'namespace_using',
+ 'nbw',
+ 'ncn',
+ 'neq',
+ 'net_connectinprogress',
+ 'net_connectok',
+ 'net_typessl',
+ 'net_typessltcp',
+ 'net_typessludp',
+ 'net_typetcp',
+ 'net_typeudp',
+ 'net_waitread',
+ 'net_waittimeout',
+ 'net_waitwrite',
+ 'new',
+ 'none',
+ 'nrx',
+ 'nslookup',
+ 'odbc_session_driver_mssql',
+ 'odbc',
+ 'output_none',
+ 'output',
+ 'pdf_package',
+ 'pdf_rectangle',
+ 'pdf_serve',
+ 'pi',
+ 'portal',
+ 'postgresql',
+ 'process',
+ 'protect_now',
+ 'protect',
+ 'queriable_average',
+ 'queriable_defaultcompare',
+ 'queriable_do',
+ 'queriable_internal_combinebindings',
+ 'queriable_max',
+ 'queriable_min',
+ 'queriable_qsort',
+ 'queriable_reversecompare',
+ 'queriable_sum',
+ 'random_seed',
+ 'range',
+ 'records_array',
+ 'records_map',
+ 'records',
+ 'redirect_url',
+ 'referer_url',
+ 'referrer_url',
+ 'register_thread',
+ 'register',
+ 'response_filepath',
+ 'response_localpath',
+ 'response_path',
+ 'response_realm',
+ 'response_root',
+ 'resultset_count',
+ 'resultset',
+ 'resultsets',
+ 'rows_array',
+ 'rows_impl',
+ 'rows',
+ 'rx',
+ 'schema_name',
+ 'security_database',
+ 'security_default_realm',
+ 'security_initialize',
+ 'security_table_groups',
+ 'security_table_ug_map',
+ 'security_table_users',
+ 'selected',
+ 'series',
+ 'server_admin',
+ 'server_ip',
+ 'server_name',
+ 'server_port',
+ 'server_protocol',
+ 'server_push',
+ 'server_signature',
+ 'server_software',
+ 'session_abort',
+ 'session_addvar',
+ 'session_decorate',
+ 'session_deleteexpired',
+ 'session_end',
+ 'session_getdefaultdriver',
+ 'session_id',
+ 'session_initialize',
+ 'session_removevar',
+ 'session_result',
+ 'session_setdefaultdriver',
+ 'session_start',
+ 'shown_count',
+ 'shown_first',
+ 'shown_last',
+ 'site_id',
+ 'site_name',
+ 'skiprecords_value',
+ 'sleep',
+ 'split_thread',
+ 'sqlite_abort',
+ 'sqlite_auth',
+ 'sqlite_blob',
+ 'sqlite_busy',
+ 'sqlite_cantopen',
+ 'sqlite_constraint',
+ 'sqlite_corrupt',
+ 'sqlite_createdb',
+ 'sqlite_done',
+ 'sqlite_empty',
+ 'sqlite_error',
+ 'sqlite_float',
+ 'sqlite_format',
+ 'sqlite_full',
+ 'sqlite_integer',
+ 'sqlite_internal',
+ 'sqlite_interrupt',
+ 'sqlite_ioerr',
+ 'sqlite_locked',
+ 'sqlite_mismatch',
+ 'sqlite_misuse',
+ 'sqlite_nolfs',
+ 'sqlite_nomem',
+ 'sqlite_notadb',
+ 'sqlite_notfound',
+ 'sqlite_null',
+ 'sqlite_ok',
+ 'sqlite_perm',
+ 'sqlite_protocol',
+ 'sqlite_range',
+ 'sqlite_readonly',
+ 'sqlite_row',
+ 'sqlite_schema',
+ 'sqlite_setsleepmillis',
+ 'sqlite_setsleeptries',
+ 'sqlite_text',
+ 'sqlite_toobig',
+ 'sqliteconnector',
+ 'staticarray_join',
+ 'stdout',
+ 'stdoutnl',
+ 'string_validcharset',
+ 'suspend',
+ 'sys_appspath',
+ 'sys_chroot',
+ 'sys_clock',
+ 'sys_clockspersec',
+ 'sys_credits',
+ 'sys_databasespath',
+ 'sys_detach_exec',
+ 'sys_difftime',
+ 'sys_dll_ext',
+ 'sys_drand48',
+ 'sys_environ',
+ 'sys_eol',
+ 'sys_erand48',
+ 'sys_errno',
+ 'sys_exec_pid_to_os_pid',
+ 'sys_exec',
+ 'sys_exit',
+ 'sys_fork',
+ 'sys_garbagecollect',
+ 'sys_getbytessincegc',
+ 'sys_getchar',
+ 'sys_getegid',
+ 'sys_getenv',
+ 'sys_geteuid',
+ 'sys_getgid',
+ 'sys_getgrnam',
+ 'sys_getheapfreebytes',
+ 'sys_getheapsize',
+ 'sys_getlogin',
+ 'sys_getpid',
+ 'sys_getppid',
+ 'sys_getpwnam',
+ 'sys_getpwuid',
+ 'sys_getstartclock',
+ 'sys_getthreadcount',
+ 'sys_getuid',
+ 'sys_growheapby',
+ 'sys_homepath',
+ 'sys_is_full_path',
+ 'sys_is_windows',
+ 'sys_isfullpath',
+ 'sys_iswindows',
+ 'sys_iterate',
+ 'sys_jrand48',
+ 'sys_kill_exec',
+ 'sys_kill',
+ 'sys_lcong48',
+ 'sys_librariespath',
+ 'sys_listtraits',
+ 'sys_listtypes',
+ 'sys_listunboundmethods',
+ 'sys_loadlibrary',
+ 'sys_lrand48',
+ 'sys_masterhomepath',
+ 'sys_mrand48',
+ 'sys_nrand48',
+ 'sys_pid_exec',
+ 'sys_pointersize',
+ 'sys_rand',
+ 'sys_random',
+ 'sys_seed48',
+ 'sys_setenv',
+ 'sys_setgid',
+ 'sys_setsid',
+ 'sys_setuid',
+ 'sys_sigabrt',
+ 'sys_sigalrm',
+ 'sys_sigbus',
+ 'sys_sigchld',
+ 'sys_sigcont',
+ 'sys_sigfpe',
+ 'sys_sighup',
+ 'sys_sigill',
+ 'sys_sigint',
+ 'sys_sigkill',
+ 'sys_sigpipe',
+ 'sys_sigprof',
+ 'sys_sigquit',
+ 'sys_sigsegv',
+ 'sys_sigstop',
+ 'sys_sigsys',
+ 'sys_sigterm',
+ 'sys_sigtrap',
+ 'sys_sigtstp',
+ 'sys_sigttin',
+ 'sys_sigttou',
+ 'sys_sigurg',
+ 'sys_sigusr1',
+ 'sys_sigusr2',
+ 'sys_sigvtalrm',
+ 'sys_sigxcpu',
+ 'sys_sigxfsz',
+ 'sys_srand',
+ 'sys_srand48',
+ 'sys_srandom',
+ 'sys_strerror',
+ 'sys_supportpath',
+ 'sys_test_exec',
+ 'sys_time',
+ 'sys_uname',
+ 'sys_unsetenv',
+ 'sys_usercapimodulepath',
+ 'sys_userstartuppath',
+ 'sys_version',
+ 'sys_wait_exec',
+ 'sys_waitpid',
+ 'sys_wcontinued',
+ 'sys_while',
+ 'sys_wnohang',
+ 'sys_wuntraced',
+ 'table_name',
+ 'tag_exists',
+ 'tag_name',
+ 'thread_var_get',
+ 'thread_var_pop',
+ 'thread_var_push',
+ 'threadvar_find',
+ 'threadvar_get',
+ 'threadvar_set_asrt',
+ 'threadvar_set',
+ 'timer',
+ 'token_value',
+ 'treemap',
+ 'u_lb_alphabetic',
+ 'u_lb_ambiguous',
+ 'u_lb_break_after',
+ 'u_lb_break_before',
+ 'u_lb_break_both',
+ 'u_lb_break_symbols',
+ 'u_lb_carriage_return',
+ 'u_lb_close_punctuation',
+ 'u_lb_combining_mark',
+ 'u_lb_complex_context',
+ 'u_lb_contingent_break',
+ 'u_lb_exclamation',
+ 'u_lb_glue',
+ 'u_lb_h2',
+ 'u_lb_h3',
+ 'u_lb_hyphen',
+ 'u_lb_ideographic',
+ 'u_lb_infix_numeric',
+ 'u_lb_inseparable',
+ 'u_lb_jl',
+ 'u_lb_jt',
+ 'u_lb_jv',
+ 'u_lb_line_feed',
+ 'u_lb_mandatory_break',
+ 'u_lb_next_line',
+ 'u_lb_nonstarter',
+ 'u_lb_numeric',
+ 'u_lb_open_punctuation',
+ 'u_lb_postfix_numeric',
+ 'u_lb_prefix_numeric',
+ 'u_lb_quotation',
+ 'u_lb_space',
+ 'u_lb_surrogate',
+ 'u_lb_unknown',
+ 'u_lb_word_joiner',
+ 'u_lb_zwspace',
+ 'u_nt_decimal',
+ 'u_nt_digit',
+ 'u_nt_none',
+ 'u_nt_numeric',
+ 'u_sb_aterm',
+ 'u_sb_close',
+ 'u_sb_format',
+ 'u_sb_lower',
+ 'u_sb_numeric',
+ 'u_sb_oletter',
+ 'u_sb_other',
+ 'u_sb_sep',
+ 'u_sb_sp',
+ 'u_sb_sterm',
+ 'u_sb_upper',
+ 'u_wb_aletter',
+ 'u_wb_extendnumlet',
+ 'u_wb_format',
+ 'u_wb_katakana',
+ 'u_wb_midletter',
+ 'u_wb_midnum',
+ 'u_wb_numeric',
+ 'u_wb_other',
+ 'ucal_ampm',
+ 'ucal_dayofmonth',
+ 'ucal_dayofweek',
+ 'ucal_dayofweekinmonth',
+ 'ucal_dayofyear',
+ 'ucal_daysinfirstweek',
+ 'ucal_dowlocal',
+ 'ucal_dstoffset',
+ 'ucal_era',
+ 'ucal_extendedyear',
+ 'ucal_firstdayofweek',
+ 'ucal_hour',
+ 'ucal_hourofday',
+ 'ucal_julianday',
+ 'ucal_lenient',
+ 'ucal_listtimezones',
+ 'ucal_millisecond',
+ 'ucal_millisecondsinday',
+ 'ucal_minute',
+ 'ucal_month',
+ 'ucal_second',
+ 'ucal_weekofmonth',
+ 'ucal_weekofyear',
+ 'ucal_year',
+ 'ucal_yearwoy',
+ 'ucal_zoneoffset',
+ 'uchar_age',
+ 'uchar_alphabetic',
+ 'uchar_ascii_hex_digit',
+ 'uchar_bidi_class',
+ 'uchar_bidi_control',
+ 'uchar_bidi_mirrored',
+ 'uchar_bidi_mirroring_glyph',
+ 'uchar_block',
+ 'uchar_canonical_combining_class',
+ 'uchar_case_folding',
+ 'uchar_case_sensitive',
+ 'uchar_dash',
+ 'uchar_decomposition_type',
+ 'uchar_default_ignorable_code_point',
+ 'uchar_deprecated',
+ 'uchar_diacritic',
+ 'uchar_east_asian_width',
+ 'uchar_extender',
+ 'uchar_full_composition_exclusion',
+ 'uchar_general_category_mask',
+ 'uchar_general_category',
+ 'uchar_grapheme_base',
+ 'uchar_grapheme_cluster_break',
+ 'uchar_grapheme_extend',
+ 'uchar_grapheme_link',
+ 'uchar_hangul_syllable_type',
+ 'uchar_hex_digit',
+ 'uchar_hyphen',
+ 'uchar_id_continue',
+ 'uchar_ideographic',
+ 'uchar_ids_binary_operator',
+ 'uchar_ids_trinary_operator',
+ 'uchar_iso_comment',
+ 'uchar_join_control',
+ 'uchar_joining_group',
+ 'uchar_joining_type',
+ 'uchar_lead_canonical_combining_class',
+ 'uchar_line_break',
+ 'uchar_logical_order_exception',
+ 'uchar_lowercase_mapping',
+ 'uchar_lowercase',
+ 'uchar_math',
+ 'uchar_name',
+ 'uchar_nfc_inert',
+ 'uchar_nfc_quick_check',
+ 'uchar_nfd_inert',
+ 'uchar_nfd_quick_check',
+ 'uchar_nfkc_inert',
+ 'uchar_nfkc_quick_check',
+ 'uchar_nfkd_inert',
+ 'uchar_nfkd_quick_check',
+ 'uchar_noncharacter_code_point',
+ 'uchar_numeric_type',
+ 'uchar_numeric_value',
+ 'uchar_pattern_syntax',
+ 'uchar_pattern_white_space',
+ 'uchar_posix_alnum',
+ 'uchar_posix_blank',
+ 'uchar_posix_graph',
+ 'uchar_posix_print',
+ 'uchar_posix_xdigit',
+ 'uchar_quotation_mark',
+ 'uchar_radical',
+ 'uchar_s_term',
+ 'uchar_script',
+ 'uchar_segment_starter',
+ 'uchar_sentence_break',
+ 'uchar_simple_case_folding',
+ 'uchar_simple_lowercase_mapping',
+ 'uchar_simple_titlecase_mapping',
+ 'uchar_simple_uppercase_mapping',
+ 'uchar_soft_dotted',
+ 'uchar_terminal_punctuation',
+ 'uchar_titlecase_mapping',
+ 'uchar_trail_canonical_combining_class',
+ 'uchar_unicode_1_name',
+ 'uchar_unified_ideograph',
+ 'uchar_uppercase_mapping',
+ 'uchar_uppercase',
+ 'uchar_variation_selector',
+ 'uchar_white_space',
+ 'uchar_word_break',
+ 'uchar_xid_continue',
+ 'uncompress',
+ 'usage',
+ 'uuid_compare',
+ 'uuid_copy',
+ 'uuid_generate_random',
+ 'uuid_generate_time',
+ 'uuid_generate',
+ 'uuid_is_null',
+ 'uuid_parse',
+ 'uuid_unparse_lower',
+ 'uuid_unparse_upper',
+ 'uuid_unparse',
+ 'value_list',
+ 'value_listitem',
+ 'valuelistitem',
+ 'var_keys',
+ 'var_values',
+ 'wap_isenabled',
+ 'wap_maxbuttons',
+ 'wap_maxcolumns',
+ 'wap_maxhorzpixels',
+ 'wap_maxrows',
+ 'wap_maxvertpixels',
+ 'web_handlefcgirequest',
+ 'web_node_content_representation_css',
+ 'web_node_content_representation_html',
+ 'web_node_content_representation_js',
+ 'web_node_content_representation_xhr',
+ 'web_node_forpath',
+ 'web_nodes_initialize',
+ 'web_nodes_normalizeextension',
+ 'web_nodes_processcontentnode',
+ 'web_nodes_requesthandler',
+ 'web_response_nodesentry',
+ 'web_router_database',
+ 'web_router_initialize',
+ 'websocket_handler_timeout',
+ 'wexitstatus',
+ 'wifcontinued',
+ 'wifexited',
+ 'wifsignaled',
+ 'wifstopped',
+ 'wstopsig',
+ 'wtermsig',
+ 'xml_transform',
+ 'xml',
+ 'zip_add_dir',
+ 'zip_add',
+ 'zip_checkcons',
+ 'zip_close',
+ 'zip_cm_bzip2',
+ 'zip_cm_default',
+ 'zip_cm_deflate',
+ 'zip_cm_deflate64',
+ 'zip_cm_implode',
+ 'zip_cm_pkware_implode',
+ 'zip_cm_reduce_1',
+ 'zip_cm_reduce_2',
+ 'zip_cm_reduce_3',
+ 'zip_cm_reduce_4',
+ 'zip_cm_shrink',
+ 'zip_cm_store',
+ 'zip_create',
+ 'zip_delete',
+ 'zip_em_3des_112',
+ 'zip_em_3des_168',
+ 'zip_em_aes_128',
+ 'zip_em_aes_192',
+ 'zip_em_aes_256',
+ 'zip_em_des',
+ 'zip_em_none',
+ 'zip_em_rc2_old',
+ 'zip_em_rc2',
+ 'zip_em_rc4',
+ 'zip_em_trad_pkware',
+ 'zip_em_unknown',
+ 'zip_er_changed',
+ 'zip_er_close',
+ 'zip_er_compnotsupp',
+ 'zip_er_crc',
+ 'zip_er_deleted',
+ 'zip_er_eof',
+ 'zip_er_exists',
+ 'zip_er_incons',
+ 'zip_er_internal',
+ 'zip_er_inval',
+ 'zip_er_memory',
+ 'zip_er_multidisk',
+ 'zip_er_noent',
+ 'zip_er_nozip',
+ 'zip_er_ok',
+ 'zip_er_open',
+ 'zip_er_read',
+ 'zip_er_remove',
+ 'zip_er_rename',
+ 'zip_er_seek',
+ 'zip_er_tmpopen',
+ 'zip_er_write',
+ 'zip_er_zipclosed',
+ 'zip_er_zlib',
+ 'zip_error_get_sys_type',
+ 'zip_error_get',
+ 'zip_error_to_str',
+ 'zip_et_none',
+ 'zip_et_sys',
+ 'zip_et_zlib',
+ 'zip_excl',
+ 'zip_fclose',
+ 'zip_file_error_get',
+ 'zip_file_strerror',
+ 'zip_fl_compressed',
+ 'zip_fl_nocase',
+ 'zip_fl_nodir',
+ 'zip_fl_unchanged',
+ 'zip_fopen_index',
+ 'zip_fopen',
+ 'zip_fread',
+ 'zip_get_archive_comment',
+ 'zip_get_file_comment',
+ 'zip_get_name',
+ 'zip_get_num_files',
+ 'zip_name_locate',
+ 'zip_open',
+ 'zip_rename',
+ 'zip_replace',
+ 'zip_set_archive_comment',
+ 'zip_set_file_comment',
+ 'zip_stat_index',
+ 'zip_stat',
+ 'zip_strerror',
+ 'zip_unchange_all',
+ 'zip_unchange_archive',
+ 'zip_unchange',
+ 'zlib_version',
+ ),
+ 'Lasso 8 Tags': (
+ '__char',
+ '__sync_timestamp__',
+ '_admin_addgroup',
+ '_admin_adduser',
+ '_admin_defaultconnector',
+ '_admin_defaultconnectornames',
+ '_admin_defaultdatabase',
+ '_admin_defaultfield',
+ '_admin_defaultgroup',
+ '_admin_defaulthost',
+ '_admin_defaulttable',
+ '_admin_defaultuser',
+ '_admin_deleteconnector',
+ '_admin_deletedatabase',
+ '_admin_deletefield',
+ '_admin_deletegroup',
+ '_admin_deletehost',
+ '_admin_deletetable',
+ '_admin_deleteuser',
+ '_admin_duplicategroup',
+ '_admin_internaldatabase',
+ '_admin_listconnectors',
+ '_admin_listdatabases',
+ '_admin_listfields',
+ '_admin_listgroups',
+ '_admin_listhosts',
+ '_admin_listtables',
+ '_admin_listusers',
+ '_admin_refreshconnector',
+ '_admin_refreshsecurity',
+ '_admin_servicepath',
+ '_admin_updateconnector',
+ '_admin_updatedatabase',
+ '_admin_updatefield',
+ '_admin_updategroup',
+ '_admin_updatehost',
+ '_admin_updatetable',
+ '_admin_updateuser',
+ '_chartfx_activation_string',
+ '_chartfx_getchallengestring',
+ '_chop_args',
+ '_chop_mimes',
+ '_client_addr_old',
+ '_client_address_old',
+ '_client_ip_old',
+ '_database_names',
+ '_datasource_reload',
+ '_date_current',
+ '_date_format',
+ '_date_msec',
+ '_date_parse',
+ '_execution_timelimit',
+ '_file_chmod',
+ '_initialize',
+ '_jdbc_acceptsurl',
+ '_jdbc_debug',
+ '_jdbc_deletehost',
+ '_jdbc_driverclasses',
+ '_jdbc_driverinfo',
+ '_jdbc_metainfo',
+ '_jdbc_propertyinfo',
+ '_jdbc_setdriver',
+ '_lasso_param',
+ '_log_helper',
+ '_proc_noparam',
+ '_proc_withparam',
+ '_recursion_limit',
+ '_request_param',
+ '_security_binaryexpiration',
+ '_security_flushcaches',
+ '_security_isserialized',
+ '_security_serialexpiration',
+ '_srand',
+ '_strict_literals',
+ '_substring',
+ '_xmlrpc_exconverter',
+ '_xmlrpc_inconverter',
+ '_xmlrpc_xmlinconverter',
+ 'abort',
+ 'action_addinfo',
+ 'action_addrecord',
+ 'action_param',
+ 'action_params',
+ 'action_setfoundcount',
+ 'action_setrecordid',
+ 'action_settotalcount',
+ 'action_statement',
+ 'admin_allowedfileroots',
+ 'admin_changeuser',
+ 'admin_createuser',
+ 'admin_currentgroups',
+ 'admin_currentuserid',
+ 'admin_currentusername',
+ 'admin_getpref',
+ 'admin_groupassignuser',
+ 'admin_grouplistusers',
+ 'admin_groupremoveuser',
+ 'admin_lassoservicepath',
+ 'admin_listgroups',
+ 'admin_refreshlicensing',
+ 'admin_refreshsecurity',
+ 'admin_reloaddatasource',
+ 'admin_removepref',
+ 'admin_setpref',
+ 'admin_userexists',
+ 'admin_userlistgroups',
+ 'all',
+ 'and',
+ 'array',
+ 'array_iterator',
+ 'auth',
+ 'auth_admin',
+ 'auth_auth',
+ 'auth_custom',
+ 'auth_group',
+ 'auth_prompt',
+ 'auth_user',
+ 'base64',
+ 'bean',
+ 'bigint',
+ 'bom_utf16be',
+ 'bom_utf16le',
+ 'bom_utf32be',
+ 'bom_utf32le',
+ 'bom_utf8',
+ 'boolean',
+ 'bw',
+ 'bytes',
+ 'cache',
+ 'cache_delete',
+ 'cache_empty',
+ 'cache_exists',
+ 'cache_fetch',
+ 'cache_internal',
+ 'cache_maintenance',
+ 'cache_object',
+ 'cache_preferences',
+ 'cache_store',
+ 'case',
+ 'chartfx',
+ 'chartfx_records',
+ 'chartfx_serve',
+ 'checked',
+ 'choice_list',
+ 'choice_listitem',
+ 'choicelistitem',
+ 'cipher_decrypt',
+ 'cipher_digest',
+ 'cipher_encrypt',
+ 'cipher_hmac',
+ 'cipher_keylength',
+ 'cipher_list',
+ 'click_text',
+ 'client_addr',
+ 'client_address',
+ 'client_authorization',
+ 'client_browser',
+ 'client_contentlength',
+ 'client_contenttype',
+ 'client_cookielist',
+ 'client_cookies',
+ 'client_encoding',
+ 'client_formmethod',
+ 'client_getargs',
+ 'client_getparams',
+ 'client_headers',
+ 'client_ip',
+ 'client_ipfrominteger',
+ 'client_iptointeger',
+ 'client_password',
+ 'client_postargs',
+ 'client_postparams',
+ 'client_type',
+ 'client_url',
+ 'client_username',
+ 'cn',
+ 'column',
+ 'column_name',
+ 'column_names',
+ 'compare_beginswith',
+ 'compare_contains',
+ 'compare_endswith',
+ 'compare_equalto',
+ 'compare_greaterthan',
+ 'compare_greaterthanorequals',
+ 'compare_greaterthanorequls',
+ 'compare_lessthan',
+ 'compare_lessthanorequals',
+ 'compare_notbeginswith',
+ 'compare_notcontains',
+ 'compare_notendswith',
+ 'compare_notequalto',
+ 'compare_notregexp',
+ 'compare_regexp',
+ 'compare_strictequalto',
+ 'compare_strictnotequalto',
+ 'compiler_removecacheddoc',
+ 'compiler_setdefaultparserflags',
+ 'compress',
+ 'content_body',
+ 'content_encoding',
+ 'content_header',
+ 'content_type',
+ 'cookie',
+ 'cookie_set',
+ 'curl_ftp_getfile',
+ 'curl_ftp_getlisting',
+ 'curl_ftp_putfile',
+ 'curl_include_url',
+ 'currency',
+ 'database_changecolumn',
+ 'database_changefield',
+ 'database_createcolumn',
+ 'database_createfield',
+ 'database_createtable',
+ 'database_fmcontainer',
+ 'database_hostinfo',
+ 'database_inline',
+ 'database_name',
+ 'database_nameitem',
+ 'database_names',
+ 'database_realname',
+ 'database_removecolumn',
+ 'database_removefield',
+ 'database_removetable',
+ 'database_repeating',
+ 'database_repeating_valueitem',
+ 'database_repeatingvalueitem',
+ 'database_schemanameitem',
+ 'database_schemanames',
+ 'database_tablecolumn',
+ 'database_tablenameitem',
+ 'database_tablenames',
+ 'datasource_name',
+ 'datasource_register',
+ 'date',
+ 'date__date_current',
+ 'date__date_format',
+ 'date__date_msec',
+ 'date__date_parse',
+ 'date_add',
+ 'date_date',
+ 'date_difference',
+ 'date_duration',
+ 'date_format',
+ 'date_getcurrentdate',
+ 'date_getday',
+ 'date_getdayofweek',
+ 'date_gethour',
+ 'date_getlocaltimezone',
+ 'date_getminute',
+ 'date_getmonth',
+ 'date_getsecond',
+ 'date_gettime',
+ 'date_getyear',
+ 'date_gmttolocal',
+ 'date_localtogmt',
+ 'date_maximum',
+ 'date_minimum',
+ 'date_msec',
+ 'date_setformat',
+ 'date_subtract',
+ 'db_layoutnameitem',
+ 'db_layoutnames',
+ 'db_nameitem',
+ 'db_names',
+ 'db_tablenameitem',
+ 'db_tablenames',
+ 'dbi_column_names',
+ 'dbi_field_names',
+ 'decimal',
+ 'decimal_setglobaldefaultprecision',
+ 'decode_base64',
+ 'decode_bheader',
+ 'decode_hex',
+ 'decode_html',
+ 'decode_json',
+ 'decode_qheader',
+ 'decode_quotedprintable',
+ 'decode_quotedprintablebytes',
+ 'decode_url',
+ 'decode_xml',
+ 'decompress',
+ 'decrypt_blowfish',
+ 'decrypt_blowfish2',
+ 'default',
+ 'define_atbegin',
+ 'define_atend',
+ 'define_constant',
+ 'define_prototype',
+ 'define_tag',
+ 'define_tagp',
+ 'define_type',
+ 'define_typep',
+ 'deserialize',
+ 'directory_directorynameitem',
+ 'directory_lister',
+ 'directory_nameitem',
+ 'directorynameitem',
+ 'dns_default',
+ 'dns_lookup',
+ 'dns_response',
+ 'duration',
+ 'else',
+ 'email_batch',
+ 'email_compose',
+ 'email_digestchallenge',
+ 'email_digestresponse',
+ 'email_extract',
+ 'email_findemails',
+ 'email_immediate',
+ 'email_merge',
+ 'email_mxerror',
+ 'email_mxlookup',
+ 'email_parse',
+ 'email_pop',
+ 'email_queue',
+ 'email_result',
+ 'email_safeemail',
+ 'email_send',
+ 'email_smtp',
+ 'email_status',
+ 'email_token',
+ 'email_translatebreakstocrlf',
+ 'encode_base64',
+ 'encode_bheader',
+ 'encode_break',
+ 'encode_breaks',
+ 'encode_crc32',
+ 'encode_hex',
+ 'encode_html',
+ 'encode_htmltoxml',
+ 'encode_json',
+ 'encode_qheader',
+ 'encode_quotedprintable',
+ 'encode_quotedprintablebytes',
+ 'encode_set',
+ 'encode_smart',
+ 'encode_sql',
+ 'encode_sql92',
+ 'encode_stricturl',
+ 'encode_url',
+ 'encode_xml',
+ 'encrypt_blowfish',
+ 'encrypt_blowfish2',
+ 'encrypt_crammd5',
+ 'encrypt_hmac',
+ 'encrypt_md5',
+ 'eq',
+ 'error_adderror',
+ 'error_code',
+ 'error_code_aborted',
+ 'error_code_assert',
+ 'error_code_bof',
+ 'error_code_connectioninvalid',
+ 'error_code_couldnotclosefile',
+ 'error_code_couldnotcreateoropenfile',
+ 'error_code_couldnotdeletefile',
+ 'error_code_couldnotdisposememory',
+ 'error_code_couldnotlockmemory',
+ 'error_code_couldnotreadfromfile',
+ 'error_code_couldnotunlockmemory',
+ 'error_code_couldnotwritetofile',
+ 'error_code_criterianotmet',
+ 'error_code_datasourceerror',
+ 'error_code_directoryfull',
+ 'error_code_diskfull',
+ 'error_code_dividebyzero',
+ 'error_code_eof',
+ 'error_code_failure',
+ 'error_code_fieldrestriction',
+ 'error_code_file',
+ 'error_code_filealreadyexists',
+ 'error_code_filecorrupt',
+ 'error_code_fileinvalid',
+ 'error_code_fileinvalidaccessmode',
+ 'error_code_fileisclosed',
+ 'error_code_fileisopen',
+ 'error_code_filelocked',
+ 'error_code_filenotfound',
+ 'error_code_fileunlocked',
+ 'error_code_httpfilenotfound',
+ 'error_code_illegalinstruction',
+ 'error_code_illegaluseoffrozeninstance',
+ 'error_code_invaliddatabase',
+ 'error_code_invalidfilename',
+ 'error_code_invalidmemoryobject',
+ 'error_code_invalidparameter',
+ 'error_code_invalidpassword',
+ 'error_code_invalidpathname',
+ 'error_code_invalidusername',
+ 'error_code_ioerror',
+ 'error_code_loopaborted',
+ 'error_code_memory',
+ 'error_code_network',
+ 'error_code_nilpointer',
+ 'error_code_noerr',
+ 'error_code_nopermission',
+ 'error_code_outofmemory',
+ 'error_code_outofstackspace',
+ 'error_code_overflow',
+ 'error_code_postconditionfailed',
+ 'error_code_preconditionfailed',
+ 'error_code_resnotfound',
+ 'error_code_resource',
+ 'error_code_streamreaderror',
+ 'error_code_streamwriteerror',
+ 'error_code_syntaxerror',
+ 'error_code_tagnotfound',
+ 'error_code_unknownerror',
+ 'error_code_varnotfound',
+ 'error_code_volumedoesnotexist',
+ 'error_code_webactionnotsupported',
+ 'error_code_webadderror',
+ 'error_code_webdeleteerror',
+ 'error_code_webmodulenotfound',
+ 'error_code_webnosuchobject',
+ 'error_code_webrepeatingrelatedfield',
+ 'error_code_webrequiredfieldmissing',
+ 'error_code_webtimeout',
+ 'error_code_webupdateerror',
+ 'error_columnrestriction',
+ 'error_currenterror',
+ 'error_databaseconnectionunavailable',
+ 'error_databasetimeout',
+ 'error_deleteerror',
+ 'error_fieldrestriction',
+ 'error_filenotfound',
+ 'error_invaliddatabase',
+ 'error_invalidpassword',
+ 'error_invalidusername',
+ 'error_modulenotfound',
+ 'error_msg',
+ 'error_msg_aborted',
+ 'error_msg_assert',
+ 'error_msg_bof',
+ 'error_msg_connectioninvalid',
+ 'error_msg_couldnotclosefile',
+ 'error_msg_couldnotcreateoropenfile',
+ 'error_msg_couldnotdeletefile',
+ 'error_msg_couldnotdisposememory',
+ 'error_msg_couldnotlockmemory',
+ 'error_msg_couldnotreadfromfile',
+ 'error_msg_couldnotunlockmemory',
+ 'error_msg_couldnotwritetofile',
+ 'error_msg_criterianotmet',
+ 'error_msg_datasourceerror',
+ 'error_msg_directoryfull',
+ 'error_msg_diskfull',
+ 'error_msg_dividebyzero',
+ 'error_msg_eof',
+ 'error_msg_failure',
+ 'error_msg_fieldrestriction',
+ 'error_msg_file',
+ 'error_msg_filealreadyexists',
+ 'error_msg_filecorrupt',
+ 'error_msg_fileinvalid',
+ 'error_msg_fileinvalidaccessmode',
+ 'error_msg_fileisclosed',
+ 'error_msg_fileisopen',
+ 'error_msg_filelocked',
+ 'error_msg_filenotfound',
+ 'error_msg_fileunlocked',
+ 'error_msg_httpfilenotfound',
+ 'error_msg_illegalinstruction',
+ 'error_msg_illegaluseoffrozeninstance',
+ 'error_msg_invaliddatabase',
+ 'error_msg_invalidfilename',
+ 'error_msg_invalidmemoryobject',
+ 'error_msg_invalidparameter',
+ 'error_msg_invalidpassword',
+ 'error_msg_invalidpathname',
+ 'error_msg_invalidusername',
+ 'error_msg_ioerror',
+ 'error_msg_loopaborted',
+ 'error_msg_memory',
+ 'error_msg_network',
+ 'error_msg_nilpointer',
+ 'error_msg_noerr',
+ 'error_msg_nopermission',
+ 'error_msg_outofmemory',
+ 'error_msg_outofstackspace',
+ 'error_msg_overflow',
+ 'error_msg_postconditionfailed',
+ 'error_msg_preconditionfailed',
+ 'error_msg_resnotfound',
+ 'error_msg_resource',
+ 'error_msg_streamreaderror',
+ 'error_msg_streamwriteerror',
+ 'error_msg_syntaxerror',
+ 'error_msg_tagnotfound',
+ 'error_msg_unknownerror',
+ 'error_msg_varnotfound',
+ 'error_msg_volumedoesnotexist',
+ 'error_msg_webactionnotsupported',
+ 'error_msg_webadderror',
+ 'error_msg_webdeleteerror',
+ 'error_msg_webmodulenotfound',
+ 'error_msg_webnosuchobject',
+ 'error_msg_webrepeatingrelatedfield',
+ 'error_msg_webrequiredfieldmissing',
+ 'error_msg_webtimeout',
+ 'error_msg_webupdateerror',
+ 'error_noerror',
+ 'error_nopermission',
+ 'error_norecordsfound',
+ 'error_outofmemory',
+ 'error_pop',
+ 'error_push',
+ 'error_reqcolumnmissing',
+ 'error_reqfieldmissing',
+ 'error_requiredcolumnmissing',
+ 'error_requiredfieldmissing',
+ 'error_reset',
+ 'error_seterrorcode',
+ 'error_seterrormessage',
+ 'error_updateerror',
+ 'euro',
+ 'event_schedule',
+ 'ew',
+ 'fail',
+ 'fail_if',
+ 'false',
+ 'field',
+ 'field_name',
+ 'field_names',
+ 'file',
+ 'file_autoresolvefullpaths',
+ 'file_chmod',
+ 'file_control',
+ 'file_copy',
+ 'file_create',
+ 'file_creationdate',
+ 'file_currenterror',
+ 'file_delete',
+ 'file_exists',
+ 'file_getlinecount',
+ 'file_getsize',
+ 'file_isdirectory',
+ 'file_listdirectory',
+ 'file_moddate',
+ 'file_modechar',
+ 'file_modeline',
+ 'file_move',
+ 'file_openread',
+ 'file_openreadwrite',
+ 'file_openwrite',
+ 'file_openwriteappend',
+ 'file_openwritetruncate',
+ 'file_probeeol',
+ 'file_processuploads',
+ 'file_read',
+ 'file_readline',
+ 'file_rename',
+ 'file_serve',
+ 'file_setsize',
+ 'file_stream',
+ 'file_streamcopy',
+ 'file_uploads',
+ 'file_waitread',
+ 'file_waittimeout',
+ 'file_waitwrite',
+ 'file_write',
+ 'find_soap_ops',
+ 'form_param',
+ 'found_count',
+ 'ft',
+ 'ftp_getfile',
+ 'ftp_getlisting',
+ 'ftp_putfile',
+ 'full',
+ 'global',
+ 'global_defined',
+ 'global_remove',
+ 'global_reset',
+ 'globals',
+ 'gt',
+ 'gte',
+ 'handle',
+ 'handle_error',
+ 'header',
+ 'html_comment',
+ 'http_getfile',
+ 'ical_alarm',
+ 'ical_attribute',
+ 'ical_calendar',
+ 'ical_daylight',
+ 'ical_event',
+ 'ical_freebusy',
+ 'ical_item',
+ 'ical_journal',
+ 'ical_parse',
+ 'ical_standard',
+ 'ical_timezone',
+ 'ical_todo',
+ 'if',
+ 'if_empty',
+ 'if_false',
+ 'if_null',
+ 'if_true',
+ 'image',
+ 'image_url',
+ 'img',
+ 'include',
+ 'include_cgi',
+ 'include_currentpath',
+ 'include_once',
+ 'include_raw',
+ 'include_url',
+ 'inline',
+ 'integer',
+ 'iterate',
+ 'iterator',
+ 'java',
+ 'java_bean',
+ 'json_records',
+ 'json_rpccall',
+ 'keycolumn_name',
+ 'keycolumn_value',
+ 'keyfield_name',
+ 'keyfield_value',
+ 'lasso_comment',
+ 'lasso_currentaction',
+ 'lasso_datasourceis',
+ 'lasso_datasourceis4d',
+ 'lasso_datasourceisfilemaker',
+ 'lasso_datasourceisfilemaker7',
+ 'lasso_datasourceisfilemaker9',
+ 'lasso_datasourceisfilemakersa',
+ 'lasso_datasourceisjdbc',
+ 'lasso_datasourceislassomysql',
+ 'lasso_datasourceismysql',
+ 'lasso_datasourceisodbc',
+ 'lasso_datasourceisopenbase',
+ 'lasso_datasourceisoracle',
+ 'lasso_datasourceispostgresql',
+ 'lasso_datasourceisspotlight',
+ 'lasso_datasourceissqlite',
+ 'lasso_datasourceissqlserver',
+ 'lasso_datasourcemodulename',
+ 'lasso_datatype',
+ 'lasso_disableondemand',
+ 'lasso_errorreporting',
+ 'lasso_executiontimelimit',
+ 'lasso_parser',
+ 'lasso_process',
+ 'lasso_sessionid',
+ 'lasso_siteid',
+ 'lasso_siteisrunning',
+ 'lasso_sitename',
+ 'lasso_siterestart',
+ 'lasso_sitestart',
+ 'lasso_sitestop',
+ 'lasso_tagexists',
+ 'lasso_tagmodulename',
+ 'lasso_uniqueid',
+ 'lasso_updatecheck',
+ 'lasso_uptime',
+ 'lasso_version',
+ 'lassoapp_create',
+ 'lassoapp_dump',
+ 'lassoapp_flattendir',
+ 'lassoapp_getappdata',
+ 'lassoapp_link',
+ 'lassoapp_list',
+ 'lassoapp_process',
+ 'lassoapp_unitize',
+ 'layout_name',
+ 'ldap',
+ 'ldap_scope_base',
+ 'ldap_scope_onelevel',
+ 'ldap_scope_subtree',
+ 'ldml',
+ 'ldml_ldml',
+ 'library',
+ 'library_once',
+ 'link',
+ 'link_currentaction',
+ 'link_currentactionparams',
+ 'link_currentactionurl',
+ 'link_currentgroup',
+ 'link_currentgroupparams',
+ 'link_currentgroupurl',
+ 'link_currentrecord',
+ 'link_currentrecordparams',
+ 'link_currentrecordurl',
+ 'link_currentsearch',
+ 'link_currentsearchparams',
+ 'link_currentsearchurl',
+ 'link_detail',
+ 'link_detailparams',
+ 'link_detailurl',
+ 'link_firstgroup',
+ 'link_firstgroupparams',
+ 'link_firstgroupurl',
+ 'link_firstrecord',
+ 'link_firstrecordparams',
+ 'link_firstrecordurl',
+ 'link_lastgroup',
+ 'link_lastgroupparams',
+ 'link_lastgroupurl',
+ 'link_lastrecord',
+ 'link_lastrecordparams',
+ 'link_lastrecordurl',
+ 'link_nextgroup',
+ 'link_nextgroupparams',
+ 'link_nextgroupurl',
+ 'link_nextrecord',
+ 'link_nextrecordparams',
+ 'link_nextrecordurl',
+ 'link_params',
+ 'link_prevgroup',
+ 'link_prevgroupparams',
+ 'link_prevgroupurl',
+ 'link_prevrecord',
+ 'link_prevrecordparams',
+ 'link_prevrecordurl',
+ 'link_setformat',
+ 'link_url',
+ 'list',
+ 'list_additem',
+ 'list_fromlist',
+ 'list_fromstring',
+ 'list_getitem',
+ 'list_itemcount',
+ 'list_iterator',
+ 'list_removeitem',
+ 'list_replaceitem',
+ 'list_reverseiterator',
+ 'list_tostring',
+ 'literal',
+ 'ljax_end',
+ 'ljax_hastarget',
+ 'ljax_include',
+ 'ljax_start',
+ 'ljax_target',
+ 'local',
+ 'local_defined',
+ 'local_remove',
+ 'local_reset',
+ 'locale_format',
+ 'locals',
+ 'log',
+ 'log_always',
+ 'log_critical',
+ 'log_deprecated',
+ 'log_destination_console',
+ 'log_destination_database',
+ 'log_destination_file',
+ 'log_detail',
+ 'log_level_critical',
+ 'log_level_deprecated',
+ 'log_level_detail',
+ 'log_level_sql',
+ 'log_level_warning',
+ 'log_setdestination',
+ 'log_sql',
+ 'log_warning',
+ 'logicalop_value',
+ 'logicaloperator_value',
+ 'loop',
+ 'loop_abort',
+ 'loop_continue',
+ 'loop_count',
+ 'lt',
+ 'lte',
+ 'magick_image',
+ 'map',
+ 'map_iterator',
+ 'match_comparator',
+ 'match_notrange',
+ 'match_notregexp',
+ 'match_range',
+ 'match_regexp',
+ 'math_abs',
+ 'math_acos',
+ 'math_add',
+ 'math_asin',
+ 'math_atan',
+ 'math_atan2',
+ 'math_ceil',
+ 'math_converteuro',
+ 'math_cos',
+ 'math_div',
+ 'math_exp',
+ 'math_floor',
+ 'math_internal_rand',
+ 'math_internal_randmax',
+ 'math_internal_srand',
+ 'math_ln',
+ 'math_log',
+ 'math_log10',
+ 'math_max',
+ 'math_min',
+ 'math_mod',
+ 'math_mult',
+ 'math_pow',
+ 'math_random',
+ 'math_range',
+ 'math_rint',
+ 'math_roman',
+ 'math_round',
+ 'math_sin',
+ 'math_sqrt',
+ 'math_sub',
+ 'math_tan',
+ 'maxrecords_value',
+ 'memory_session_driver',
+ 'mime_type',
+ 'minimal',
+ 'misc__srand',
+ 'misc_randomnumber',
+ 'misc_roman',
+ 'misc_valid_creditcard',
+ 'mysql_session_driver',
+ 'named_param',
+ 'namespace_current',
+ 'namespace_delimiter',
+ 'namespace_exists',
+ 'namespace_file_fullpathexists',
+ 'namespace_global',
+ 'namespace_import',
+ 'namespace_load',
+ 'namespace_page',
+ 'namespace_unload',
+ 'namespace_using',
+ 'neq',
+ 'net',
+ 'net_connectinprogress',
+ 'net_connectok',
+ 'net_typessl',
+ 'net_typessltcp',
+ 'net_typessludp',
+ 'net_typetcp',
+ 'net_typeudp',
+ 'net_waitread',
+ 'net_waittimeout',
+ 'net_waitwrite',
+ 'no_default_output',
+ 'none',
+ 'noprocess',
+ 'not',
+ 'nrx',
+ 'nslookup',
+ 'null',
+ 'object',
+ 'once',
+ 'oneoff',
+ 'op_logicalvalue',
+ 'operator_logicalvalue',
+ 'option',
+ 'or',
+ 'os_process',
+ 'output',
+ 'output_none',
+ 'pair',
+ 'params_up',
+ 'pdf_barcode',
+ 'pdf_color',
+ 'pdf_doc',
+ 'pdf_font',
+ 'pdf_image',
+ 'pdf_list',
+ 'pdf_read',
+ 'pdf_serve',
+ 'pdf_table',
+ 'pdf_text',
+ 'percent',
+ 'portal',
+ 'postcondition',
+ 'precondition',
+ 'prettyprintingnsmap',
+ 'prettyprintingtypemap',
+ 'priorityqueue',
+ 'private',
+ 'proc_convert',
+ 'proc_convertbody',
+ 'proc_convertone',
+ 'proc_extract',
+ 'proc_extractone',
+ 'proc_find',
+ 'proc_first',
+ 'proc_foreach',
+ 'proc_get',
+ 'proc_join',
+ 'proc_lasso',
+ 'proc_last',
+ 'proc_map_entry',
+ 'proc_null',
+ 'proc_regexp',
+ 'proc_xml',
+ 'proc_xslt',
+ 'process',
+ 'protect',
+ 'queue',
+ 'rand',
+ 'randomnumber',
+ 'raw',
+ 'recid_value',
+ 'record_count',
+ 'recordcount',
+ 'recordid_value',
+ 'records',
+ 'records_array',
+ 'records_map',
+ 'redirect_url',
+ 'reference',
+ 'referer',
+ 'referer_url',
+ 'referrer',
+ 'referrer_url',
+ 'regexp',
+ 'repeating',
+ 'repeating_valueitem',
+ 'repeatingvalueitem',
+ 'repetition',
+ 'req_column',
+ 'req_field',
+ 'required_column',
+ 'required_field',
+ 'response_fileexists',
+ 'response_filepath',
+ 'response_localpath',
+ 'response_path',
+ 'response_realm',
+ 'resultset',
+ 'resultset_count',
+ 'return',
+ 'return_value',
+ 'reverseiterator',
+ 'roman',
+ 'row_count',
+ 'rows',
+ 'rows_array',
+ 'run_children',
+ 'rx',
+ 'schema_name',
+ 'scientific',
+ 'search_args',
+ 'search_arguments',
+ 'search_columnitem',
+ 'search_fielditem',
+ 'search_operatoritem',
+ 'search_opitem',
+ 'search_valueitem',
+ 'searchfielditem',
+ 'searchoperatoritem',
+ 'searchopitem',
+ 'searchvalueitem',
+ 'select',
+ 'selected',
+ 'self',
+ 'serialize',
+ 'series',
+ 'server_date',
+ 'server_day',
+ 'server_ip',
+ 'server_name',
+ 'server_port',
+ 'server_push',
+ 'server_siteisrunning',
+ 'server_sitestart',
+ 'server_sitestop',
+ 'server_time',
+ 'session_abort',
+ 'session_addoutputfilter',
+ 'session_addvar',
+ 'session_addvariable',
+ 'session_deleteexpired',
+ 'session_driver',
+ 'session_end',
+ 'session_id',
+ 'session_removevar',
+ 'session_removevariable',
+ 'session_result',
+ 'session_setdriver',
+ 'session_start',
+ 'set',
+ 'set_iterator',
+ 'set_reverseiterator',
+ 'shown_count',
+ 'shown_first',
+ 'shown_last',
+ 'site_atbegin',
+ 'site_id',
+ 'site_name',
+ 'site_restart',
+ 'skiprecords_value',
+ 'sleep',
+ 'soap_convertpartstopairs',
+ 'soap_definetag',
+ 'soap_info',
+ 'soap_lastrequest',
+ 'soap_lastresponse',
+ 'soap_stub',
+ 'sort_args',
+ 'sort_arguments',
+ 'sort_columnitem',
+ 'sort_fielditem',
+ 'sort_orderitem',
+ 'sortcolumnitem',
+ 'sortfielditem',
+ 'sortorderitem',
+ 'sqlite_createdb',
+ 'sqlite_session_driver',
+ 'sqlite_setsleepmillis',
+ 'sqlite_setsleeptries',
+ 'srand',
+ 'stack',
+ 'stock_quote',
+ 'string',
+ 'string_charfromname',
+ 'string_concatenate',
+ 'string_countfields',
+ 'string_endswith',
+ 'string_extract',
+ 'string_findposition',
+ 'string_findregexp',
+ 'string_fordigit',
+ 'string_getfield',
+ 'string_getunicodeversion',
+ 'string_insert',
+ 'string_isalpha',
+ 'string_isalphanumeric',
+ 'string_isdigit',
+ 'string_ishexdigit',
+ 'string_islower',
+ 'string_isnumeric',
+ 'string_ispunctuation',
+ 'string_isspace',
+ 'string_isupper',
+ 'string_length',
+ 'string_lowercase',
+ 'string_remove',
+ 'string_removeleading',
+ 'string_removetrailing',
+ 'string_replace',
+ 'string_replaceregexp',
+ 'string_todecimal',
+ 'string_tointeger',
+ 'string_uppercase',
+ 'string_validcharset',
+ 'table_name',
+ 'table_realname',
+ 'tag',
+ 'tag_name',
+ 'tags',
+ 'tags_find',
+ 'tags_list',
+ 'tcp_close',
+ 'tcp_open',
+ 'tcp_send',
+ 'tcp_tcp_close',
+ 'tcp_tcp_open',
+ 'tcp_tcp_send',
+ 'thread_abort',
+ 'thread_atomic',
+ 'thread_event',
+ 'thread_exists',
+ 'thread_getcurrentid',
+ 'thread_getpriority',
+ 'thread_info',
+ 'thread_list',
+ 'thread_lock',
+ 'thread_pipe',
+ 'thread_priority_default',
+ 'thread_priority_high',
+ 'thread_priority_low',
+ 'thread_rwlock',
+ 'thread_semaphore',
+ 'thread_setpriority',
+ 'token_value',
+ 'total_records',
+ 'treemap',
+ 'treemap_iterator',
+ 'true',
+ 'url_rewrite',
+ 'valid_creditcard',
+ 'valid_date',
+ 'valid_email',
+ 'valid_url',
+ 'value_list',
+ 'value_listitem',
+ 'valuelistitem',
+ 'var',
+ 'var_defined',
+ 'var_remove',
+ 'var_reset',
+ 'var_set',
+ 'variable',
+ 'variable_defined',
+ 'variable_set',
+ 'variables',
+ 'variant_count',
+ 'vars',
+ 'wap_isenabled',
+ 'wap_maxbuttons',
+ 'wap_maxcolumns',
+ 'wap_maxhorzpixels',
+ 'wap_maxrows',
+ 'wap_maxvertpixels',
+ 'while',
+ 'wsdl_extract',
+ 'wsdl_getbinding',
+ 'wsdl_getbindingforoperation',
+ 'wsdl_getbindingoperations',
+ 'wsdl_getmessagenamed',
+ 'wsdl_getmessageparts',
+ 'wsdl_getmessagetriofromporttype',
+ 'wsdl_getopbodystyle',
+ 'wsdl_getopbodyuse',
+ 'wsdl_getoperation',
+ 'wsdl_getoplocation',
+ 'wsdl_getopmessagetypes',
+ 'wsdl_getopsoapaction',
+ 'wsdl_getportaddress',
+ 'wsdl_getportsforservice',
+ 'wsdl_getporttype',
+ 'wsdl_getporttypeoperation',
+ 'wsdl_getservicedocumentation',
+ 'wsdl_getservices',
+ 'wsdl_gettargetnamespace',
+ 'wsdl_issoapoperation',
+ 'wsdl_listoperations',
+ 'wsdl_maketest',
+ 'xml',
+ 'xml_extract',
+ 'xml_rpc',
+ 'xml_rpccall',
+ 'xml_rw',
+ 'xml_serve',
+ 'xml_transform',
+ 'xml_xml',
+ 'xml_xmlstream',
+ 'xmlstream',
+ 'xsd_attribute',
+ 'xsd_blankarraybase',
+ 'xsd_blankbase',
+ 'xsd_buildtype',
+ 'xsd_cache',
+ 'xsd_checkcardinality',
+ 'xsd_continueall',
+ 'xsd_continueannotation',
+ 'xsd_continueany',
+ 'xsd_continueanyattribute',
+ 'xsd_continueattribute',
+ 'xsd_continueattributegroup',
+ 'xsd_continuechoice',
+ 'xsd_continuecomplexcontent',
+ 'xsd_continuecomplextype',
+ 'xsd_continuedocumentation',
+ 'xsd_continueextension',
+ 'xsd_continuegroup',
+ 'xsd_continuekey',
+ 'xsd_continuelist',
+ 'xsd_continuerestriction',
+ 'xsd_continuesequence',
+ 'xsd_continuesimplecontent',
+ 'xsd_continuesimpletype',
+ 'xsd_continueunion',
+ 'xsd_deserialize',
+ 'xsd_fullyqualifyname',
+ 'xsd_generate',
+ 'xsd_generateblankfromtype',
+ 'xsd_generateblanksimpletype',
+ 'xsd_generatetype',
+ 'xsd_getschematype',
+ 'xsd_issimpletype',
+ 'xsd_loadschema',
+ 'xsd_lookupnamespaceuri',
+ 'xsd_lookuptype',
+ 'xsd_processany',
+ 'xsd_processattribute',
+ 'xsd_processattributegroup',
+ 'xsd_processcomplextype',
+ 'xsd_processelement',
+ 'xsd_processgroup',
+ 'xsd_processimport',
+ 'xsd_processinclude',
+ 'xsd_processschema',
+ 'xsd_processsimpletype',
+ 'xsd_ref',
+ 'xsd_type',
+ )
+}
+MEMBERS = {
+ 'Member Methods': (
+ 'abort',
+ 'abs',
+ 'accept_charset',
+ 'accept',
+ 'acceptconnections',
+ 'acceptdeserializedelement',
+ 'acceptnossl',
+ 'acceptpost',
+ 'accesskey',
+ 'acos',
+ 'acosh',
+ 'action',
+ 'actionparams',
+ 'active_tick',
+ 'add',
+ 'addatend',
+ 'addattachment',
+ 'addbarcode',
+ 'addchapter',
+ 'addcheckbox',
+ 'addcolumninfo',
+ 'addcombobox',
+ 'addcomment',
+ 'addcomponent',
+ 'addcomponents',
+ 'addcss',
+ 'adddatabasetable',
+ 'adddatasource',
+ 'adddatasourcedatabase',
+ 'adddatasourcehost',
+ 'adddir',
+ 'adddirpath',
+ 'addendjs',
+ 'addendjstext',
+ 'adderror',
+ 'addfavicon',
+ 'addfile',
+ 'addgroup',
+ 'addheader',
+ 'addhiddenfield',
+ 'addhtmlpart',
+ 'addimage',
+ 'addjavascript',
+ 'addjs',
+ 'addjstext',
+ 'addlist',
+ 'addmathfunctions',
+ 'addmember',
+ 'addoneheaderline',
+ 'addpage',
+ 'addparagraph',
+ 'addpart',
+ 'addpasswordfield',
+ 'addphrase',
+ 'addpostdispatch',
+ 'addpredispatch',
+ 'addradiobutton',
+ 'addradiogroup',
+ 'addresetbutton',
+ 'addrow',
+ 'addsection',
+ 'addselectlist',
+ 'addset',
+ 'addsubmitbutton',
+ 'addsubnode',
+ 'addtable',
+ 'addtask',
+ 'addtext',
+ 'addtextarea',
+ 'addtextfield',
+ 'addtextpart',
+ 'addtobuffer',
+ 'addtrait',
+ 'adduser',
+ 'addusertogroup',
+ 'addwarning',
+ 'addzip',
+ 'allocobject',
+ 'am',
+ 'ampm',
+ 'annotate',
+ 'answer',
+ 'apop',
+ 'append',
+ 'appendarray',
+ 'appendarraybegin',
+ 'appendarrayend',
+ 'appendbool',
+ 'appendbytes',
+ 'appendchar',
+ 'appendchild',
+ 'appendcolon',
+ 'appendcomma',
+ 'appenddata',
+ 'appenddatetime',
+ 'appenddbpointer',
+ 'appenddecimal',
+ 'appenddocument',
+ 'appendimagetolist',
+ 'appendinteger',
+ 'appendnowutc',
+ 'appendnull',
+ 'appendoid',
+ 'appendregex',
+ 'appendreplacement',
+ 'appendstring',
+ 'appendtail',
+ 'appendtime',
+ 'applyheatcolors',
+ 'appmessage',
+ 'appname',
+ 'appprefix',
+ 'appstatus',
+ 'arc',
+ 'archive',
+ 'arguments',
+ 'argumentvalue',
+ 'asarray',
+ 'asarraystring',
+ 'asasync',
+ 'asbytes',
+ 'ascopy',
+ 'ascopydeep',
+ 'asdecimal',
+ 'asgenerator',
+ 'asin',
+ 'asinh',
+ 'asinteger',
+ 'askeyedgenerator',
+ 'aslazystring',
+ 'aslist',
+ 'asraw',
+ 'asstaticarray',
+ 'asstring',
+ 'asstringhex',
+ 'asstringoct',
+ 'asxml',
+ 'atan',
+ 'atan2',
+ 'atanh',
+ 'atend',
+ 'atends',
+ 'atime',
+ 'attributecount',
+ 'attributes',
+ 'attrs',
+ 'auth',
+ 'authenticate',
+ 'authorize',
+ 'autocollectbuffer',
+ 'average',
+ 'back',
+ 'basename',
+ 'basepaths',
+ 'baseuri',
+ 'bcc',
+ 'beginssl',
+ 'beginswith',
+ 'begintls',
+ 'bestcharset',
+ 'bind_blob',
+ 'bind_double',
+ 'bind_int',
+ 'bind_null',
+ 'bind_parameter_index',
+ 'bind_text',
+ 'bind',
+ 'bindcount',
+ 'bindone',
+ 'bindparam',
+ 'bitand',
+ 'bitclear',
+ 'bitflip',
+ 'bitformat',
+ 'bitnot',
+ 'bitor',
+ 'bitset',
+ 'bitshiftleft',
+ 'bitshiftright',
+ 'bittest',
+ 'bitxor',
+ 'blur',
+ 'body',
+ 'bodybytes',
+ 'boundary',
+ 'bptoxml',
+ 'bptypetostr',
+ 'bucketnumber',
+ 'buff',
+ 'buildquery',
+ 'businessdaysbetween',
+ 'by',
+ 'bytes',
+ 'cachedappprefix',
+ 'cachedroot',
+ 'callboolean',
+ 'callbooleanmethod',
+ 'callbytemethod',
+ 'callcharmethod',
+ 'calldoublemethod',
+ 'calledname',
+ 'callfirst',
+ 'callfloat',
+ 'callfloatmethod',
+ 'callint',
+ 'callintmethod',
+ 'calllongmethod',
+ 'callnonvirtualbooleanmethod',
+ 'callnonvirtualbytemethod',
+ 'callnonvirtualcharmethod',
+ 'callnonvirtualdoublemethod',
+ 'callnonvirtualfloatmethod',
+ 'callnonvirtualintmethod',
+ 'callnonvirtuallongmethod',
+ 'callnonvirtualobjectmethod',
+ 'callnonvirtualshortmethod',
+ 'callnonvirtualvoidmethod',
+ 'callobject',
+ 'callobjectmethod',
+ 'callshortmethod',
+ 'callsite_col',
+ 'callsite_file',
+ 'callsite_line',
+ 'callstack',
+ 'callstaticboolean',
+ 'callstaticbooleanmethod',
+ 'callstaticbytemethod',
+ 'callstaticcharmethod',
+ 'callstaticdoublemethod',
+ 'callstaticfloatmethod',
+ 'callstaticint',
+ 'callstaticintmethod',
+ 'callstaticlongmethod',
+ 'callstaticobject',
+ 'callstaticobjectmethod',
+ 'callstaticshortmethod',
+ 'callstaticstring',
+ 'callstaticvoidmethod',
+ 'callstring',
+ 'callvoid',
+ 'callvoidmethod',
+ 'cancel',
+ 'cap',
+ 'capa',
+ 'capabilities',
+ 'capi',
+ 'cbrt',
+ 'cc',
+ 'ceil',
+ 'chardigitvalue',
+ 'charname',
+ 'charset',
+ 'chartype',
+ 'checkdebugging',
+ 'checked',
+ 'checkuser',
+ 'childnodes',
+ 'chk',
+ 'chmod',
+ 'choosecolumntype',
+ 'chown',
+ 'chunked',
+ 'circle',
+ 'class',
+ 'classid',
+ 'clear',
+ 'clonenode',
+ 'close',
+ 'closepath',
+ 'closeprepared',
+ 'closewrite',
+ 'code',
+ 'codebase',
+ 'codetype',
+ 'colmap',
+ 'colorspace',
+ 'column_blob',
+ 'column_count',
+ 'column_decltype',
+ 'column_double',
+ 'column_int64',
+ 'column_name',
+ 'column_text',
+ 'column_type',
+ 'command',
+ 'comments',
+ 'compare',
+ 'comparecodepointorder',
+ 'componentdelimiter',
+ 'components',
+ 'composite',
+ 'compress',
+ 'concat',
+ 'condtoint',
+ 'configureds',
+ 'configuredskeys',
+ 'connect',
+ 'connection',
+ 'connectionhandler',
+ 'connhandler',
+ 'consume_domain',
+ 'consume_label',
+ 'consume_message',
+ 'consume_rdata',
+ 'consume_string',
+ 'contains',
+ 'content_disposition',
+ 'content_transfer_encoding',
+ 'content_type',
+ 'content',
+ 'contentlength',
+ 'contents',
+ 'contenttype',
+ 'continuation',
+ 'continuationpacket',
+ 'continuationpoint',
+ 'continuationstack',
+ 'continue',
+ 'contrast',
+ 'conventionaltop',
+ 'convert',
+ 'cookie',
+ 'cookies',
+ 'cookiesarray',
+ 'cookiesary',
+ 'copyto',
+ 'cos',
+ 'cosh',
+ 'count',
+ 'countkeys',
+ 'country',
+ 'countusersbygroup',
+ 'crc',
+ 'create',
+ 'createattribute',
+ 'createattributens',
+ 'createcdatasection',
+ 'createcomment',
+ 'createdocument',
+ 'createdocumentfragment',
+ 'createdocumenttype',
+ 'createelement',
+ 'createelementns',
+ 'createentityreference',
+ 'createindex',
+ 'createprocessinginstruction',
+ 'createtable',
+ 'createtextnode',
+ 'criteria',
+ 'crop',
+ 'csscontent',
+ 'curl',
+ 'current',
+ 'currentfile',
+ 'curveto',
+ 'd',
+ 'data',
+ 'databasecolumnnames',
+ 'databasecolumns',
+ 'databasemap',
+ 'databasename',
+ 'datasourcecolumnnames',
+ 'datasourcecolumns',
+ 'datasourcemap',
+ 'date',
+ 'day',
+ 'dayofmonth',
+ 'dayofweek',
+ 'dayofweekinmonth',
+ 'dayofyear',
+ 'days',
+ 'daysbetween',
+ 'db',
+ 'dbtablestable',
+ 'debug',
+ 'declare',
+ 'decodebase64',
+ 'decodehex',
+ 'decodehtml',
+ 'decodeqp',
+ 'decodeurl',
+ 'decodexml',
+ 'decompose',
+ 'decomposeassignment',
+ 'defaultcontentrepresentation',
+ 'defer',
+ 'deg2rad',
+ 'dele',
+ 'delete',
+ 'deletedata',
+ 'deleteglobalref',
+ 'deletelocalref',
+ 'delim',
+ 'depth',
+ 'dereferencepointer',
+ 'describe',
+ 'description',
+ 'deserialize',
+ 'detach',
+ 'detectcharset',
+ 'didinclude',
+ 'difference',
+ 'digit',
+ 'dir',
+ 'displaycountry',
+ 'displaylanguage',
+ 'displayname',
+ 'displayscript',
+ 'displayvariant',
+ 'div',
+ 'dns_response',
+ 'do',
+ 'doatbegins',
+ 'doatends',
+ 'doccomment',
+ 'doclose',
+ 'doctype',
+ 'document',
+ 'documentelement',
+ 'documentroot',
+ 'domainbody',
+ 'done',
+ 'dosessions',
+ 'dowithclose',
+ 'dowlocal',
+ 'download',
+ 'drawtext',
+ 'drop',
+ 'dropindex',
+ 'dsdbtable',
+ 'dshoststable',
+ 'dsinfo',
+ 'dst',
+ 'dstable',
+ 'dstoffset',
+ 'dtdid',
+ 'dup',
+ 'dup2',
+ 'each',
+ 'eachbyte',
+ 'eachcharacter',
+ 'eachchild',
+ 'eachcomponent',
+ 'eachdir',
+ 'eachdirpath',
+ 'eachdirpathrecursive',
+ 'eachentry',
+ 'eachfile',
+ 'eachfilename',
+ 'eachfilepath',
+ 'eachfilepathrecursive',
+ 'eachkey',
+ 'eachline',
+ 'eachlinebreak',
+ 'eachmatch',
+ 'eachnode',
+ 'eachpair',
+ 'eachpath',
+ 'eachpathrecursive',
+ 'eachrow',
+ 'eachsub',
+ 'eachword',
+ 'eachwordbreak',
+ 'element',
+ 'eligiblepath',
+ 'eligiblepaths',
+ 'encodebase64',
+ 'encodehex',
+ 'encodehtml',
+ 'encodehtmltoxml',
+ 'encodemd5',
+ 'encodepassword',
+ 'encodeqp',
+ 'encodesql',
+ 'encodesql92',
+ 'encodeurl',
+ 'encodevalue',
+ 'encodexml',
+ 'encoding',
+ 'enctype',
+ 'end',
+ 'endjs',
+ 'endssl',
+ 'endswith',
+ 'endtls',
+ 'enhance',
+ 'ensurestopped',
+ 'entities',
+ 'entry',
+ 'env',
+ 'equals',
+ 'era',
+ 'erf',
+ 'erfc',
+ 'err',
+ 'errcode',
+ 'errmsg',
+ 'error',
+ 'errors',
+ 'errstack',
+ 'escape_member',
+ 'establisherrorstate',
+ 'exceptioncheck',
+ 'exceptionclear',
+ 'exceptiondescribe',
+ 'exceptionoccurred',
+ 'exchange',
+ 'execinits',
+ 'execinstalls',
+ 'execute',
+ 'executelazy',
+ 'executenow',
+ 'exists',
+ 'exit',
+ 'exitcode',
+ 'exp',
+ 'expire',
+ 'expireminutes',
+ 'expiresminutes',
+ 'expm1',
+ 'export16bits',
+ 'export32bits',
+ 'export64bits',
+ 'export8bits',
+ 'exportas',
+ 'exportbytes',
+ 'exportfdf',
+ 'exportpointerbits',
+ 'exportsigned16bits',
+ 'exportsigned32bits',
+ 'exportsigned64bits',
+ 'exportsigned8bits',
+ 'exportstring',
+ 'expose',
+ 'extendedyear',
+ 'extensiondelimiter',
+ 'extensions',
+ 'extract',
+ 'extractfast',
+ 'extractfastone',
+ 'extractimage',
+ 'extractone',
+ 'f',
+ 'fabs',
+ 'fail',
+ 'failnoconnectionhandler',
+ 'family',
+ 'fatalerror',
+ 'fcgireq',
+ 'fchdir',
+ 'fchmod',
+ 'fchown',
+ 'fd',
+ 'features',
+ 'fetchdata',
+ 'fieldnames',
+ 'fieldposition',
+ 'fieldstable',
+ 'fieldtype',
+ 'fieldvalue',
+ 'file',
+ 'filename',
+ 'filenames',
+ 'filequeue',
+ 'fileuploads',
+ 'fileuploadsary',
+ 'filterinputcolumn',
+ 'finalize',
+ 'find',
+ 'findall',
+ 'findandmodify',
+ 'findbucket',
+ 'findcase',
+ 'findclass',
+ 'findcount',
+ 'finddescendant',
+ 'findfirst',
+ 'findinclude',
+ 'findinctx',
+ 'findindex',
+ 'findlast',
+ 'findpattern',
+ 'findposition',
+ 'findsymbols',
+ 'first',
+ 'firstchild',
+ 'firstcomponent',
+ 'firstdayofweek',
+ 'firstnode',
+ 'fixformat',
+ 'flags',
+ 'fliph',
+ 'flipv',
+ 'floor',
+ 'flush',
+ 'foldcase',
+ 'foo',
+ 'for',
+ 'forcedrowid',
+ 'foreach',
+ 'foreachaccept',
+ 'foreachbyte',
+ 'foreachcharacter',
+ 'foreachchild',
+ 'foreachday',
+ 'foreachentry',
+ 'foreachfile',
+ 'foreachfilename',
+ 'foreachkey',
+ 'foreachline',
+ 'foreachlinebreak',
+ 'foreachmatch',
+ 'foreachnode',
+ 'foreachpair',
+ 'foreachpathcomponent',
+ 'foreachrow',
+ 'foreachspool',
+ 'foreachsub',
+ 'foreachwordbreak',
+ 'form',
+ 'format',
+ 'formatas',
+ 'formatcontextelement',
+ 'formatcontextelements',
+ 'formatnumber',
+ 'free',
+ 'frexp',
+ 'from',
+ 'fromname',
+ 'fromport',
+ 'fromreflectedfield',
+ 'fromreflectedmethod',
+ 'front',
+ 'fsync',
+ 'ftpdeletefile',
+ 'ftpgetlisting',
+ 'ftruncate',
+ 'fullpath',
+ 'fx',
+ 'gamma',
+ 'gatewayinterface',
+ 'gen',
+ 'generatechecksum',
+ 'get',
+ 'getabswidth',
+ 'getalignment',
+ 'getappsource',
+ 'getarraylength',
+ 'getattr',
+ 'getattribute',
+ 'getattributenamespace',
+ 'getattributenode',
+ 'getattributenodens',
+ 'getattributens',
+ 'getbarheight',
+ 'getbarmultiplier',
+ 'getbarwidth',
+ 'getbaseline',
+ 'getbold',
+ 'getbooleanarrayelements',
+ 'getbooleanarrayregion',
+ 'getbooleanfield',
+ 'getbordercolor',
+ 'getborderwidth',
+ 'getbytearrayelements',
+ 'getbytearrayregion',
+ 'getbytefield',
+ 'getchararrayelements',
+ 'getchararrayregion',
+ 'getcharfield',
+ 'getclass',
+ 'getcode',
+ 'getcolor',
+ 'getcolumn',
+ 'getcolumncount',
+ 'getcolumns',
+ 'getdatabasebyalias',
+ 'getdatabasebyid',
+ 'getdatabasebyname',
+ 'getdatabasehost',
+ 'getdatabasetable',
+ 'getdatabasetablebyalias',
+ 'getdatabasetablebyid',
+ 'getdatabasetablepart',
+ 'getdatasource',
+ 'getdatasourcedatabase',
+ 'getdatasourcedatabasebyid',
+ 'getdatasourcehost',
+ 'getdatasourceid',
+ 'getdatasourcename',
+ 'getdefaultstorage',
+ 'getdoublearrayelements',
+ 'getdoublearrayregion',
+ 'getdoublefield',
+ 'getelementbyid',
+ 'getelementsbytagname',
+ 'getelementsbytagnamens',
+ 'getencoding',
+ 'getface',
+ 'getfield',
+ 'getfieldid',
+ 'getfile',
+ 'getfloatarrayelements',
+ 'getfloatarrayregion',
+ 'getfloatfield',
+ 'getfont',
+ 'getformat',
+ 'getfullfontname',
+ 'getgroup',
+ 'getgroupid',
+ 'getheader',
+ 'getheaders',
+ 'gethostdatabase',
+ 'gethtmlattr',
+ 'gethtmlattrstring',
+ 'getinclude',
+ 'getintarrayelements',
+ 'getintarrayregion',
+ 'getintfield',
+ 'getisocomment',
+ 'getitalic',
+ 'getlasterror',
+ 'getlcapitype',
+ 'getlibrary',
+ 'getlongarrayelements',
+ 'getlongarrayregion',
+ 'getlongfield',
+ 'getmargins',
+ 'getmethodid',
+ 'getmode',
+ 'getnameditem',
+ 'getnameditemns',
+ 'getnode',
+ 'getnumericvalue',
+ 'getobjectarrayelement',
+ 'getobjectclass',
+ 'getobjectfield',
+ 'getpadding',
+ 'getpagenumber',
+ 'getparts',
+ 'getprefs',
+ 'getpropertyvalue',
+ 'getprowcount',
+ 'getpsfontname',
+ 'getrange',
+ 'getrowcount',
+ 'getset',
+ 'getshortarrayelements',
+ 'getshortarrayregion',
+ 'getshortfield',
+ 'getsize',
+ 'getsortfieldspart',
+ 'getspacing',
+ 'getstaticbooleanfield',
+ 'getstaticbytefield',
+ 'getstaticcharfield',
+ 'getstaticdoublefield',
+ 'getstaticfieldid',
+ 'getstaticfloatfield',
+ 'getstaticintfield',
+ 'getstaticlongfield',
+ 'getstaticmethodid',
+ 'getstaticobjectfield',
+ 'getstaticshortfield',
+ 'getstatus',
+ 'getstringchars',
+ 'getstringlength',
+ 'getstyle',
+ 'getsupportedencodings',
+ 'gettablebyid',
+ 'gettext',
+ 'gettextalignment',
+ 'gettextsize',
+ 'gettrigger',
+ 'gettype',
+ 'getunderline',
+ 'getuniquealiasname',
+ 'getuser',
+ 'getuserbykey',
+ 'getuserid',
+ 'getversion',
+ 'getzipfilebytes',
+ 'givenblock',
+ 'gmt',
+ 'gotconnection',
+ 'gotfileupload',
+ 'groupby',
+ 'groupcolumns',
+ 'groupcount',
+ 'groupjoin',
+ 'handlebreakpointget',
+ 'handlebreakpointlist',
+ 'handlebreakpointremove',
+ 'handlebreakpointset',
+ 'handlebreakpointupdate',
+ 'handlecontextget',
+ 'handlecontextnames',
+ 'handlecontinuation',
+ 'handledefinitionbody',
+ 'handledefinitionhead',
+ 'handledefinitionresource',
+ 'handledevconnection',
+ 'handleevalexpired',
+ 'handlefeatureget',
+ 'handlefeatureset',
+ 'handlelassoappcontent',
+ 'handlelassoappresponse',
+ 'handlenested',
+ 'handlenormalconnection',
+ 'handlepop',
+ 'handleresource',
+ 'handlesource',
+ 'handlestackget',
+ 'handlestderr',
+ 'handlestdin',
+ 'handlestdout',
+ 'handshake',
+ 'hasattribute',
+ 'hasattributens',
+ 'hasattributes',
+ 'hasbinaryproperty',
+ 'haschildnodes',
+ 'hasexpired',
+ 'hasfeature',
+ 'hasfield',
+ 'hash',
+ 'hashtmlattr',
+ 'hasmethod',
+ 'hastable',
+ 'hastrailingcomponent',
+ 'hasvalue',
+ 'head',
+ 'header',
+ 'headerbytes',
+ 'headers',
+ 'headersarray',
+ 'headersmap',
+ 'height',
+ 'histogram',
+ 'home',
+ 'host',
+ 'hostcolumnnames',
+ 'hostcolumnnames2',
+ 'hostcolumns',
+ 'hostcolumns2',
+ 'hostdatasource',
+ 'hostextra',
+ 'hostid',
+ 'hostisdynamic',
+ 'hostmap',
+ 'hostmap2',
+ 'hostname',
+ 'hostpassword',
+ 'hostport',
+ 'hostschema',
+ 'hosttableencoding',
+ 'hosttonet16',
+ 'hosttonet32',
+ 'hosttonet64',
+ 'hostusername',
+ 'hour',
+ 'hourofampm',
+ 'hourofday',
+ 'hoursbetween',
+ 'href',
+ 'hreflang',
+ 'htmlcontent',
+ 'htmlizestacktrace',
+ 'htmlizestacktracelink',
+ 'httpaccept',
+ 'httpacceptencoding',
+ 'httpacceptlanguage',
+ 'httpauthorization',
+ 'httpcachecontrol',
+ 'httpconnection',
+ 'httpcookie',
+ 'httpequiv',
+ 'httphost',
+ 'httpreferer',
+ 'httpreferrer',
+ 'httpuseragent',
+ 'hypot',
+ 'id',
+ 'idealinmemory',
+ 'idle',
+ 'idmap',
+ 'ifempty',
+ 'ifkey',
+ 'ifnotempty',
+ 'ifnotkey',
+ 'ignorecase',
+ 'ilogb',
+ 'imgptr',
+ 'implementation',
+ 'import16bits',
+ 'import32bits',
+ 'import64bits',
+ 'import8bits',
+ 'importas',
+ 'importbytes',
+ 'importfdf',
+ 'importnode',
+ 'importpointer',
+ 'importstring',
+ 'in',
+ 'include',
+ 'includebytes',
+ 'includelibrary',
+ 'includelibraryonce',
+ 'includeonce',
+ 'includes',
+ 'includestack',
+ 'indaylighttime',
+ 'index',
+ 'init',
+ 'initialize',
+ 'initrequest',
+ 'inits',
+ 'inneroncompare',
+ 'input',
+ 'inputcolumns',
+ 'inputtype',
+ 'insert',
+ 'insertback',
+ 'insertbefore',
+ 'insertdata',
+ 'insertfirst',
+ 'insertfrom',
+ 'insertfront',
+ 'insertinternal',
+ 'insertlast',
+ 'insertpage',
+ 'install',
+ 'installs',
+ 'integer',
+ 'internalsubset',
+ 'interrupt',
+ 'intersection',
+ 'inttocond',
+ 'invoke',
+ 'invokeautocollect',
+ 'invokeuntil',
+ 'invokewhile',
+ 'ioctl',
+ 'isa',
+ 'isalive',
+ 'isallof',
+ 'isalnum',
+ 'isalpha',
+ 'isanyof',
+ 'isbase',
+ 'isblank',
+ 'iscntrl',
+ 'isdigit',
+ 'isdir',
+ 'isdirectory',
+ 'isempty',
+ 'isemptyelement',
+ 'isfirststep',
+ 'isfullpath',
+ 'isgraph',
+ 'ishttps',
+ 'isidle',
+ 'isinstanceof',
+ 'islink',
+ 'islower',
+ 'ismultipart',
+ 'isnan',
+ 'isnota',
+ 'isnotempty',
+ 'isnothing',
+ 'iso3country',
+ 'iso3language',
+ 'isopen',
+ 'isprint',
+ 'ispunct',
+ 'issameobject',
+ 'isset',
+ 'issourcefile',
+ 'isspace',
+ 'isssl',
+ 'issupported',
+ 'istitle',
+ 'istruetype',
+ 'istype',
+ 'isualphabetic',
+ 'isulowercase',
+ 'isupper',
+ 'isuuppercase',
+ 'isuwhitespace',
+ 'isvalid',
+ 'iswhitespace',
+ 'isxdigit',
+ 'isxhr',
+ 'item',
+ 'j0',
+ 'j1',
+ 'javascript',
+ 'jbarcode',
+ 'jcolor',
+ 'jfont',
+ 'jimage',
+ 'jlist',
+ 'jn',
+ 'jobjectisa',
+ 'join',
+ 'jread',
+ 'jscontent',
+ 'jsonfornode',
+ 'jsonhtml',
+ 'jsonisleaf',
+ 'jsonlabel',
+ 'jtable',
+ 'jtext',
+ 'julianday',
+ 'kernel',
+ 'key',
+ 'keycolumns',
+ 'keys',
+ 'keywords',
+ 'kill',
+ 'label',
+ 'lang',
+ 'language',
+ 'last_insert_rowid',
+ 'last',
+ 'lastaccessdate',
+ 'lastaccesstime',
+ 'lastchild',
+ 'lastcomponent',
+ 'lasterror',
+ 'lastinsertid',
+ 'lastnode',
+ 'lastpoint',
+ 'lasttouched',
+ 'lazyvalue',
+ 'ldexp',
+ 'leaveopen',
+ 'left',
+ 'length',
+ 'lgamma',
+ 'line',
+ 'linediffers',
+ 'linkto',
+ 'linktype',
+ 'list',
+ 'listactivedatasources',
+ 'listalldatabases',
+ 'listalltables',
+ 'listdatabasetables',
+ 'listdatasourcedatabases',
+ 'listdatasourcehosts',
+ 'listdatasources',
+ 'listen',
+ 'listgroups',
+ 'listgroupsbyuser',
+ 'listhostdatabases',
+ 'listhosts',
+ 'listmethods',
+ 'listnode',
+ 'listusers',
+ 'listusersbygroup',
+ 'loadcerts',
+ 'loaddatasourcehostinfo',
+ 'loaddatasourceinfo',
+ 'loadlibrary',
+ 'localaddress',
+ 'localname',
+ 'locals',
+ 'lock',
+ 'log',
+ 'log10',
+ 'log1p',
+ 'logb',
+ 'lookupnamespace',
+ 'lop',
+ 'lowagiefont',
+ 'lowercase',
+ 'makecolor',
+ 'makecolumnlist',
+ 'makecolumnmap',
+ 'makecookieyumyum',
+ 'makefullpath',
+ 'makeinheritedcopy',
+ 'makenonrelative',
+ 'makeurl',
+ 'map',
+ 'marker',
+ 'matches',
+ 'matchesstart',
+ 'matchposition',
+ 'matchstring',
+ 'matchtriggers',
+ 'max',
+ 'maxinmemory',
+ 'maxlength',
+ 'maxrows',
+ 'maxworkers',
+ 'maybeslash',
+ 'maybevalue',
+ 'md5hex',
+ 'media',
+ 'members',
+ 'merge',
+ 'meta',
+ 'method',
+ 'methodname',
+ 'millisecond',
+ 'millisecondsinday',
+ 'mime_boundary',
+ 'mime_contenttype',
+ 'mime_hdrs',
+ 'mime',
+ 'mimes',
+ 'min',
+ 'minute',
+ 'minutesbetween',
+ 'moddatestr',
+ 'mode',
+ 'modf',
+ 'modificationdate',
+ 'modificationtime',
+ 'modulate',
+ 'monitorenter',
+ 'monitorexit',
+ 'month',
+ 'moveto',
+ 'movetoattribute',
+ 'movetoattributenamespace',
+ 'movetoelement',
+ 'movetofirstattribute',
+ 'movetonextattribute',
+ 'msg',
+ 'mtime',
+ 'multiple',
+ 'n',
+ 'name',
+ 'named',
+ 'namespaceuri',
+ 'needinitialization',
+ 'net',
+ 'nettohost16',
+ 'nettohost32',
+ 'nettohost64',
+ 'new',
+ 'newbooleanarray',
+ 'newbytearray',
+ 'newchararray',
+ 'newdoublearray',
+ 'newfloatarray',
+ 'newglobalref',
+ 'newintarray',
+ 'newlongarray',
+ 'newobject',
+ 'newobjectarray',
+ 'newshortarray',
+ 'newstring',
+ 'next',
+ 'nextafter',
+ 'nextnode',
+ 'nextprime',
+ 'nextprune',
+ 'nextprunedelta',
+ 'nextsibling',
+ 'nodeforpath',
+ 'nodelist',
+ 'nodename',
+ 'nodetype',
+ 'nodevalue',
+ 'noop',
+ 'normalize',
+ 'notationname',
+ 'notations',
+ 'novaluelists',
+ 'numsets',
+ 'object',
+ 'objects',
+ 'objecttype',
+ 'onclick',
+ 'oncompare',
+ 'oncomparestrict',
+ 'onconvert',
+ 'oncreate',
+ 'ondblclick',
+ 'onkeydown',
+ 'onkeypress',
+ 'onkeyup',
+ 'onmousedown',
+ 'onmousemove',
+ 'onmouseout',
+ 'onmouseover',
+ 'onmouseup',
+ 'onreset',
+ 'onsubmit',
+ 'ontop',
+ 'open',
+ 'openappend',
+ 'openread',
+ 'opentruncate',
+ 'openwith',
+ 'openwrite',
+ 'openwriteonly',
+ 'orderby',
+ 'orderbydescending',
+ 'out',
+ 'output',
+ 'outputencoding',
+ 'ownerdocument',
+ 'ownerelement',
+ 'padleading',
+ 'padtrailing',
+ 'padzero',
+ 'pagecount',
+ 'pagerotation',
+ 'pagesize',
+ 'param',
+ 'paramdescs',
+ 'params',
+ 'parent',
+ 'parentdir',
+ 'parentnode',
+ 'parse_body',
+ 'parse_boundary',
+ 'parse_charset',
+ 'parse_content_disposition',
+ 'parse_content_transfer_encoding',
+ 'parse_content_type',
+ 'parse_hdrs',
+ 'parse_mode',
+ 'parse_msg',
+ 'parse_parts',
+ 'parse_rawhdrs',
+ 'parse',
+ 'parseas',
+ 'parsedocument',
+ 'parsenumber',
+ 'parseoneheaderline',
+ 'pass',
+ 'path',
+ 'pathinfo',
+ 'pathtouri',
+ 'pathtranslated',
+ 'pause',
+ 'payload',
+ 'pdifference',
+ 'perform',
+ 'performonce',
+ 'perms',
+ 'pid',
+ 'pixel',
+ 'pm',
+ 'polldbg',
+ 'pollide',
+ 'pop_capa',
+ 'pop_cmd',
+ 'pop_debug',
+ 'pop_err',
+ 'pop_get',
+ 'pop_ids',
+ 'pop_index',
+ 'pop_log',
+ 'pop_mode',
+ 'pop_net',
+ 'pop_res',
+ 'pop_server',
+ 'pop_timeout',
+ 'pop_token',
+ 'pop',
+ 'popctx',
+ 'popinclude',
+ 'populate',
+ 'port',
+ 'position',
+ 'postdispatch',
+ 'postparam',
+ 'postparams',
+ 'postparamsary',
+ 'poststring',
+ 'pow',
+ 'predispatch',
+ 'prefix',
+ 'preflight',
+ 'prepare',
+ 'prepared',
+ 'pretty',
+ 'prev',
+ 'previoussibling',
+ 'printsimplemsg',
+ 'private_compare',
+ 'private_find',
+ 'private_findlast',
+ 'private_merge',
+ 'private_rebalanceforinsert',
+ 'private_rebalanceforremove',
+ 'private_replaceall',
+ 'private_replacefirst',
+ 'private_rotateleft',
+ 'private_rotateright',
+ 'private_setrange',
+ 'private_split',
+ 'probemimetype',
+ 'provides',
+ 'proxying',
+ 'prune',
+ 'publicid',
+ 'pullhttpheader',
+ 'pullmimepost',
+ 'pulloneheaderline',
+ 'pullpost',
+ 'pullrawpost',
+ 'pullrawpostchunks',
+ 'pullrequest',
+ 'pullrequestline',
+ 'push',
+ 'pushctx',
+ 'pushinclude',
+ 'qdarray',
+ 'qdcount',
+ 'queryparam',
+ 'queryparams',
+ 'queryparamsary',
+ 'querystring',
+ 'queue_maintenance',
+ 'queue_messages',
+ 'queue_status',
+ 'queue',
+ 'quit',
+ 'r',
+ 'raw',
+ 'rawcontent',
+ 'rawdiff',
+ 'rawheader',
+ 'rawheaders',
+ 'rawinvokable',
+ 'read',
+ 'readattributevalue',
+ 'readbytes',
+ 'readbytesfully',
+ 'readdestinations',
+ 'readerror',
+ 'readidobjects',
+ 'readline',
+ 'readmessage',
+ 'readnumber',
+ 'readobject',
+ 'readobjecttcp',
+ 'readpacket',
+ 'readsomebytes',
+ 'readstring',
+ 'ready',
+ 'realdoc',
+ 'realpath',
+ 'receivefd',
+ 'recipients',
+ 'recover',
+ 'rect',
+ 'rectype',
+ 'red',
+ 'redirectto',
+ 'referrals',
+ 'refid',
+ 'refobj',
+ 'refresh',
+ 'rel',
+ 'remainder',
+ 'remoteaddr',
+ 'remoteaddress',
+ 'remoteport',
+ 'remove',
+ 'removeall',
+ 'removeattribute',
+ 'removeattributenode',
+ 'removeattributens',
+ 'removeback',
+ 'removechild',
+ 'removedatabasetable',
+ 'removedatasource',
+ 'removedatasourcedatabase',
+ 'removedatasourcehost',
+ 'removefield',
+ 'removefirst',
+ 'removefront',
+ 'removegroup',
+ 'removelast',
+ 'removeleading',
+ 'removenameditem',
+ 'removenameditemns',
+ 'removenode',
+ 'removesubnode',
+ 'removetrailing',
+ 'removeuser',
+ 'removeuserfromallgroups',
+ 'removeuserfromgroup',
+ 'rename',
+ 'renderbytes',
+ 'renderdocumentbytes',
+ 'renderstring',
+ 'replace',
+ 'replaceall',
+ 'replacechild',
+ 'replacedata',
+ 'replacefirst',
+ 'replaceheader',
+ 'replacepattern',
+ 'representnode',
+ 'representnoderesult',
+ 'reqid',
+ 'requestid',
+ 'requestmethod',
+ 'requestparams',
+ 'requesturi',
+ 'requires',
+ 'reserve',
+ 'reset',
+ 'resize',
+ 'resolutionh',
+ 'resolutionv',
+ 'resolvelinks',
+ 'resourcedata',
+ 'resourceinvokable',
+ 'resourcename',
+ 'resources',
+ 'respond',
+ 'restart',
+ 'restname',
+ 'result',
+ 'results',
+ 'resume',
+ 'retr',
+ 'retrieve',
+ 'returncolumns',
+ 'returntype',
+ 'rev',
+ 'reverse',
+ 'rewind',
+ 'right',
+ 'rint',
+ 'roll',
+ 'root',
+ 'rootmap',
+ 'rotate',
+ 'route',
+ 'rowsfound',
+ 'rset',
+ 'rule',
+ 'rules',
+ 'run',
+ 'running',
+ 'runonce',
+ 's',
+ 'sa',
+ 'safeexport8bits',
+ 'sameas',
+ 'save',
+ 'savedata',
+ 'scalb',
+ 'scale',
+ 'scanfordatasource',
+ 'scantasks',
+ 'scanworkers',
+ 'schemaname',
+ 'scheme',
+ 'script',
+ 'scriptextensions',
+ 'scriptfilename',
+ 'scriptname',
+ 'scripttype',
+ 'scripturi',
+ 'scripturl',
+ 'scrubkeywords',
+ 'search',
+ 'searchinbucket',
+ 'searchurl',
+ 'second',
+ 'secondsbetween',
+ 'seek',
+ 'select',
+ 'selected',
+ 'selectmany',
+ 'self',
+ 'send',
+ 'sendchunk',
+ 'sendfd',
+ 'sendfile',
+ 'sendpacket',
+ 'sendresponse',
+ 'separator',
+ 'serializationelements',
+ 'serialize',
+ 'serveraddr',
+ 'serveradmin',
+ 'servername',
+ 'serverport',
+ 'serverprotocol',
+ 'serversignature',
+ 'serversoftware',
+ 'sessionsdump',
+ 'sessionsmap',
+ 'set',
+ 'setalignment',
+ 'setattr',
+ 'setattribute',
+ 'setattributenode',
+ 'setattributenodens',
+ 'setattributens',
+ 'setbarheight',
+ 'setbarmultiplier',
+ 'setbarwidth',
+ 'setbaseline',
+ 'setbold',
+ 'setbooleanarrayregion',
+ 'setbooleanfield',
+ 'setbordercolor',
+ 'setborderwidth',
+ 'setbytearrayregion',
+ 'setbytefield',
+ 'setchararrayregion',
+ 'setcharfield',
+ 'setcode',
+ 'setcolor',
+ 'setcolorspace',
+ 'setcookie',
+ 'setcwd',
+ 'setdefaultstorage',
+ 'setdestination',
+ 'setdoublearrayregion',
+ 'setdoublefield',
+ 'setencoding',
+ 'setface',
+ 'setfieldvalue',
+ 'setfindpattern',
+ 'setfloatarrayregion',
+ 'setfloatfield',
+ 'setfont',
+ 'setformat',
+ 'setgeneratechecksum',
+ 'setheaders',
+ 'sethtmlattr',
+ 'setignorecase',
+ 'setinput',
+ 'setintarrayregion',
+ 'setintfield',
+ 'setitalic',
+ 'setlinewidth',
+ 'setlongarrayregion',
+ 'setlongfield',
+ 'setmarker',
+ 'setmaxfilesize',
+ 'setmode',
+ 'setname',
+ 'setnameditem',
+ 'setnameditemns',
+ 'setobjectarrayelement',
+ 'setobjectfield',
+ 'setpadding',
+ 'setpagenumber',
+ 'setpagerange',
+ 'setposition',
+ 'setrange',
+ 'setreplacepattern',
+ 'setshortarrayregion',
+ 'setshortfield',
+ 'setshowchecksum',
+ 'setsize',
+ 'setspacing',
+ 'setstaticbooleanfield',
+ 'setstaticbytefield',
+ 'setstaticcharfield',
+ 'setstaticdoublefield',
+ 'setstaticfloatfield',
+ 'setstaticintfield',
+ 'setstaticlongfield',
+ 'setstaticobjectfield',
+ 'setstaticshortfield',
+ 'setstatus',
+ 'settextalignment',
+ 'settextsize',
+ 'settimezone',
+ 'settrait',
+ 'setunderline',
+ 'sharpen',
+ 'shouldabort',
+ 'shouldclose',
+ 'showchecksum',
+ 'showcode39startstop',
+ 'showeanguardbars',
+ 'shutdownrd',
+ 'shutdownrdwr',
+ 'shutdownwr',
+ 'sin',
+ 'sinh',
+ 'size',
+ 'skip',
+ 'skiprows',
+ 'sort',
+ 'sortcolumns',
+ 'source',
+ 'sourcecolumn',
+ 'sourcefile',
+ 'sourceline',
+ 'specified',
+ 'split',
+ 'splitconnection',
+ 'splitdebuggingthread',
+ 'splitextension',
+ 'splittext',
+ 'splitthread',
+ 'splittoprivatedev',
+ 'splituppath',
+ 'sql',
+ 'sqlite3',
+ 'sqrt',
+ 'src',
+ 'srcpath',
+ 'sslerrfail',
+ 'stack',
+ 'standby',
+ 'start',
+ 'startone',
+ 'startup',
+ 'stat',
+ 'statement',
+ 'statementonly',
+ 'stats',
+ 'status',
+ 'statuscode',
+ 'statusmsg',
+ 'stdin',
+ 'step',
+ 'stls',
+ 'stop',
+ 'stoprunning',
+ 'storedata',
+ 'stripfirstcomponent',
+ 'striplastcomponent',
+ 'style',
+ 'styletype',
+ 'sub',
+ 'subject',
+ 'subnode',
+ 'subnodes',
+ 'substringdata',
+ 'subtract',
+ 'subtraits',
+ 'sum',
+ 'supportscontentrepresentation',
+ 'swapbytes',
+ 'systemid',
+ 't',
+ 'tabindex',
+ 'table',
+ 'tablecolumnnames',
+ 'tablecolumns',
+ 'tablehascolumn',
+ 'tableizestacktrace',
+ 'tableizestacktracelink',
+ 'tablemap',
+ 'tablename',
+ 'tables',
+ 'tabs',
+ 'tabstr',
+ 'tag',
+ 'tagname',
+ 'take',
+ 'tan',
+ 'tanh',
+ 'target',
+ 'tasks',
+ 'tb',
+ 'tell',
+ 'testexitcode',
+ 'testlock',
+ 'textwidth',
+ 'thenby',
+ 'thenbydescending',
+ 'threadreaddesc',
+ 'throw',
+ 'thrownew',
+ 'time',
+ 'timezone',
+ 'title',
+ 'titlecase',
+ 'to',
+ 'token',
+ 'tolower',
+ 'top',
+ 'toreflectedfield',
+ 'toreflectedmethod',
+ 'total_changes',
+ 'totitle',
+ 'touch',
+ 'toupper',
+ 'toxmlstring',
+ 'trace',
+ 'trackingid',
+ 'trait',
+ 'transform',
+ 'trigger',
+ 'trim',
+ 'trunk',
+ 'tryfinderrorfile',
+ 'trylock',
+ 'tryreadobject',
+ 'type',
+ 'typename',
+ 'uidl',
+ 'uncompress',
+ 'unescape',
+ 'union',
+ 'uniqueid',
+ 'unlock',
+ 'unspool',
+ 'up',
+ 'update',
+ 'updategroup',
+ 'upload',
+ 'uppercase',
+ 'url',
+ 'used',
+ 'usemap',
+ 'user',
+ 'usercolumns',
+ 'valid',
+ 'validate',
+ 'validatesessionstable',
+ 'value',
+ 'values',
+ 'valuetype',
+ 'variant',
+ 'version',
+ 'wait',
+ 'waitforcompletion',
+ 'warnings',
+ 'week',
+ 'weekofmonth',
+ 'weekofyear',
+ 'where',
+ 'width',
+ 'workers',
+ 'workinginputcolumns',
+ 'workingkeycolumns',
+ 'workingkeyfield_name',
+ 'workingreturncolumns',
+ 'workingsortcolumns',
+ 'write',
+ 'writebodybytes',
+ 'writebytes',
+ 'writeheader',
+ 'writeheaderbytes',
+ 'writeheaderline',
+ 'writeid',
+ 'writemessage',
+ 'writeobject',
+ 'writeobjecttcp',
+ 'writestring',
+ 'wroteheaders',
+ 'xhtml',
+ 'xmllang',
+ 'y0',
+ 'y1',
+ 'year',
+ 'yearwoy',
+ 'yn',
+ 'z',
+ 'zip',
+ 'zipfile',
+ 'zipfilename',
+ 'zipname',
+ 'zips',
+ 'zoneoffset',
+ ),
+ 'Lasso 8 Member Tags': (
+ 'accept',
+ 'add',
+ 'addattachment',
+ 'addattribute',
+ 'addbarcode',
+ 'addchapter',
+ 'addcheckbox',
+ 'addchild',
+ 'addcombobox',
+ 'addcomment',
+ 'addcontent',
+ 'addhiddenfield',
+ 'addhtmlpart',
+ 'addimage',
+ 'addjavascript',
+ 'addlist',
+ 'addnamespace',
+ 'addnextsibling',
+ 'addpage',
+ 'addparagraph',
+ 'addparenttype',
+ 'addpart',
+ 'addpasswordfield',
+ 'addphrase',
+ 'addprevsibling',
+ 'addradiobutton',
+ 'addradiogroup',
+ 'addresetbutton',
+ 'addsection',
+ 'addselectlist',
+ 'addsibling',
+ 'addsubmitbutton',
+ 'addtable',
+ 'addtext',
+ 'addtextarea',
+ 'addtextfield',
+ 'addtextpart',
+ 'alarms',
+ 'annotate',
+ 'answer',
+ 'append',
+ 'appendreplacement',
+ 'appendtail',
+ 'arc',
+ 'asasync',
+ 'astype',
+ 'atbegin',
+ 'atbottom',
+ 'atend',
+ 'atfarleft',
+ 'atfarright',
+ 'attop',
+ 'attributecount',
+ 'attributes',
+ 'authenticate',
+ 'authorize',
+ 'backward',
+ 'baseuri',
+ 'bcc',
+ 'beanproperties',
+ 'beginswith',
+ 'bind',
+ 'bitand',
+ 'bitclear',
+ 'bitflip',
+ 'bitformat',
+ 'bitnot',
+ 'bitor',
+ 'bitset',
+ 'bitshiftleft',
+ 'bitshiftright',
+ 'bittest',
+ 'bitxor',
+ 'blur',
+ 'body',
+ 'boundary',
+ 'bytes',
+ 'call',
+ 'cancel',
+ 'capabilities',
+ 'cc',
+ 'chardigitvalue',
+ 'charname',
+ 'charset',
+ 'chartype',
+ 'children',
+ 'circle',
+ 'close',
+ 'closepath',
+ 'closewrite',
+ 'code',
+ 'colorspace',
+ 'command',
+ 'comments',
+ 'compare',
+ 'comparecodepointorder',
+ 'compile',
+ 'composite',
+ 'connect',
+ 'contains',
+ 'content_disposition',
+ 'content_transfer_encoding',
+ 'content_type',
+ 'contents',
+ 'contrast',
+ 'convert',
+ 'crop',
+ 'curveto',
+ 'data',
+ 'date',
+ 'day',
+ 'daylights',
+ 'dayofweek',
+ 'dayofyear',
+ 'decrement',
+ 'delete',
+ 'depth',
+ 'describe',
+ 'description',
+ 'deserialize',
+ 'detach',
+ 'detachreference',
+ 'difference',
+ 'digit',
+ 'document',
+ 'down',
+ 'drawtext',
+ 'dst',
+ 'dump',
+ 'endswith',
+ 'enhance',
+ 'equals',
+ 'errors',
+ 'eval',
+ 'events',
+ 'execute',
+ 'export16bits',
+ 'export32bits',
+ 'export64bits',
+ 'export8bits',
+ 'exportfdf',
+ 'exportstring',
+ 'extract',
+ 'extractone',
+ 'fieldnames',
+ 'fieldtype',
+ 'fieldvalue',
+ 'file',
+ 'find',
+ 'findindex',
+ 'findnamespace',
+ 'findnamespacebyhref',
+ 'findpattern',
+ 'findposition',
+ 'first',
+ 'firstchild',
+ 'fliph',
+ 'flipv',
+ 'flush',
+ 'foldcase',
+ 'foreach',
+ 'format',
+ 'forward',
+ 'freebusies',
+ 'freezetype',
+ 'freezevalue',
+ 'from',
+ 'fulltype',
+ 'generatechecksum',
+ 'get',
+ 'getabswidth',
+ 'getalignment',
+ 'getattribute',
+ 'getattributenamespace',
+ 'getbarheight',
+ 'getbarmultiplier',
+ 'getbarwidth',
+ 'getbaseline',
+ 'getbordercolor',
+ 'getborderwidth',
+ 'getcode',
+ 'getcolor',
+ 'getcolumncount',
+ 'getencoding',
+ 'getface',
+ 'getfont',
+ 'getformat',
+ 'getfullfontname',
+ 'getheaders',
+ 'getmargins',
+ 'getmethod',
+ 'getnumericvalue',
+ 'getpadding',
+ 'getpagenumber',
+ 'getparams',
+ 'getproperty',
+ 'getpsfontname',
+ 'getrange',
+ 'getrowcount',
+ 'getsize',
+ 'getspacing',
+ 'getsupportedencodings',
+ 'gettextalignment',
+ 'gettextsize',
+ 'gettype',
+ 'gmt',
+ 'groupcount',
+ 'hasattribute',
+ 'haschildren',
+ 'hasvalue',
+ 'header',
+ 'headers',
+ 'height',
+ 'histogram',
+ 'hosttonet16',
+ 'hosttonet32',
+ 'hour',
+ 'id',
+ 'ignorecase',
+ 'import16bits',
+ 'import32bits',
+ 'import64bits',
+ 'import8bits',
+ 'importfdf',
+ 'importstring',
+ 'increment',
+ 'input',
+ 'insert',
+ 'insertatcurrent',
+ 'insertfirst',
+ 'insertfrom',
+ 'insertlast',
+ 'insertpage',
+ 'integer',
+ 'intersection',
+ 'invoke',
+ 'isa',
+ 'isalnum',
+ 'isalpha',
+ 'isbase',
+ 'iscntrl',
+ 'isdigit',
+ 'isemptyelement',
+ 'islower',
+ 'isopen',
+ 'isprint',
+ 'isspace',
+ 'istitle',
+ 'istruetype',
+ 'isualphabetic',
+ 'isulowercase',
+ 'isupper',
+ 'isuuppercase',
+ 'isuwhitespace',
+ 'iswhitespace',
+ 'iterator',
+ 'javascript',
+ 'join',
+ 'journals',
+ 'key',
+ 'keys',
+ 'last',
+ 'lastchild',
+ 'lasterror',
+ 'left',
+ 'length',
+ 'line',
+ 'listen',
+ 'localaddress',
+ 'localname',
+ 'lock',
+ 'lookupnamespace',
+ 'lowercase',
+ 'marker',
+ 'matches',
+ 'matchesstart',
+ 'matchposition',
+ 'matchstring',
+ 'merge',
+ 'millisecond',
+ 'minute',
+ 'mode',
+ 'modulate',
+ 'month',
+ 'moveto',
+ 'movetoattributenamespace',
+ 'movetoelement',
+ 'movetofirstattribute',
+ 'movetonextattribute',
+ 'name',
+ 'namespaces',
+ 'namespaceuri',
+ 'nettohost16',
+ 'nettohost32',
+ 'newchild',
+ 'next',
+ 'nextsibling',
+ 'nodetype',
+ 'open',
+ 'output',
+ 'padleading',
+ 'padtrailing',
+ 'pagecount',
+ 'pagesize',
+ 'paraminfo',
+ 'params',
+ 'parent',
+ 'path',
+ 'pixel',
+ 'position',
+ 'prefix',
+ 'previoussibling',
+ 'properties',
+ 'rawheaders',
+ 'read',
+ 'readattributevalue',
+ 'readerror',
+ 'readfrom',
+ 'readline',
+ 'readlock',
+ 'readstring',
+ 'readunlock',
+ 'recipients',
+ 'rect',
+ 'refcount',
+ 'referrals',
+ 'remoteaddress',
+ 'remove',
+ 'removeall',
+ 'removeattribute',
+ 'removechild',
+ 'removecurrent',
+ 'removefirst',
+ 'removelast',
+ 'removeleading',
+ 'removenamespace',
+ 'removetrailing',
+ 'render',
+ 'replace',
+ 'replaceall',
+ 'replacefirst',
+ 'replacepattern',
+ 'replacewith',
+ 'reserve',
+ 'reset',
+ 'resolutionh',
+ 'resolutionv',
+ 'response',
+ 'results',
+ 'retrieve',
+ 'returntype',
+ 'reverse',
+ 'reverseiterator',
+ 'right',
+ 'rotate',
+ 'run',
+ 'save',
+ 'scale',
+ 'search',
+ 'second',
+ 'send',
+ 'serialize',
+ 'set',
+ 'setalignment',
+ 'setbarheight',
+ 'setbarmultiplier',
+ 'setbarwidth',
+ 'setbaseline',
+ 'setblocking',
+ 'setbordercolor',
+ 'setborderwidth',
+ 'setbytes',
+ 'setcode',
+ 'setcolor',
+ 'setcolorspace',
+ 'setdatatype',
+ 'setencoding',
+ 'setface',
+ 'setfieldvalue',
+ 'setfont',
+ 'setformat',
+ 'setgeneratechecksum',
+ 'setheight',
+ 'setlassodata',
+ 'setlinewidth',
+ 'setmarker',
+ 'setmode',
+ 'setname',
+ 'setpadding',
+ 'setpagenumber',
+ 'setpagerange',
+ 'setposition',
+ 'setproperty',
+ 'setrange',
+ 'setshowchecksum',
+ 'setsize',
+ 'setspacing',
+ 'settemplate',
+ 'settemplatestr',
+ 'settextalignment',
+ 'settextdata',
+ 'settextsize',
+ 'settype',
+ 'setunderline',
+ 'setwidth',
+ 'setxmldata',
+ 'sharpen',
+ 'showchecksum',
+ 'showcode39startstop',
+ 'showeanguardbars',
+ 'signal',
+ 'signalall',
+ 'size',
+ 'smooth',
+ 'sort',
+ 'sortwith',
+ 'split',
+ 'standards',
+ 'steal',
+ 'subject',
+ 'substring',
+ 'subtract',
+ 'swapbytes',
+ 'textwidth',
+ 'time',
+ 'timezones',
+ 'titlecase',
+ 'to',
+ 'todos',
+ 'tolower',
+ 'totitle',
+ 'toupper',
+ 'transform',
+ 'trim',
+ 'type',
+ 'unescape',
+ 'union',
+ 'uniqueid',
+ 'unlock',
+ 'unserialize',
+ 'up',
+ 'uppercase',
+ 'value',
+ 'values',
+ 'valuetype',
+ 'wait',
+ 'waskeyword',
+ 'week',
+ 'width',
+ 'write',
+ 'writelock',
+ 'writeto',
+ 'writeunlock',
+ 'xmllang',
+ 'xmlschematype',
+ 'year',
+ )
+}
diff --git a/pygments/lexers/_lilypond_builtins.py b/pygments/lexers/_lilypond_builtins.py
new file mode 100644
index 0000000..89cc626
--- /dev/null
+++ b/pygments/lexers/_lilypond_builtins.py
@@ -0,0 +1,4886 @@
+"""
+ pygments.lexers._lilypond_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ LilyPond builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Contents generated by the script lilypond-builtins-generator.ly
+# found in the external/ directory of the source tree.
+
+keywords = [
+ "accepts",
+ "addlyrics",
+ "alias",
+ "book",
+ "bookpart",
+ "chordmode",
+ "chords",
+ "consists",
+ "context",
+ "defaultchild",
+ "denies",
+ "description",
+ "drummode",
+ "drums",
+ "etc",
+ "figuremode",
+ "figures",
+ "header",
+ "include",
+ "inherit-acceptability",
+ "language",
+ "layout",
+ "lyricmode",
+ "lyricsto",
+ "midi",
+ "name",
+ "new",
+ "notemode",
+ "paper",
+ "remove",
+ "score",
+ "type",
+ "version",
+ "with",
+]
+
+clefs = [
+ "C",
+ "F",
+ "G",
+ "G2",
+ "GG",
+ "alto",
+ "altovarC",
+ "baritone",
+ "baritonevarC",
+ "baritonevarF",
+ "bass",
+ "blackmensural-c1",
+ "blackmensural-c2",
+ "blackmensural-c3",
+ "blackmensural-c4",
+ "blackmensural-c5",
+ "french",
+ "hufnagel-do-fa",
+ "hufnagel-do1",
+ "hufnagel-do2",
+ "hufnagel-do3",
+ "hufnagel-fa1",
+ "hufnagel-fa2",
+ "kievan-do",
+ "medicaea-do1",
+ "medicaea-do2",
+ "medicaea-do3",
+ "medicaea-fa1",
+ "medicaea-fa2",
+ "mensural-c1",
+ "mensural-c2",
+ "mensural-c3",
+ "mensural-c4",
+ "mensural-c5",
+ "mensural-f",
+ "mensural-g",
+ "mezzosoprano",
+ "moderntab",
+ "neomensural-c1",
+ "neomensural-c2",
+ "neomensural-c3",
+ "neomensural-c4",
+ "neomensural-c5",
+ "percussion",
+ "petrucci-c1",
+ "petrucci-c2",
+ "petrucci-c3",
+ "petrucci-c4",
+ "petrucci-c5",
+ "petrucci-f",
+ "petrucci-f2",
+ "petrucci-f3",
+ "petrucci-f4",
+ "petrucci-f5",
+ "petrucci-g",
+ "petrucci-g1",
+ "petrucci-g2",
+ "soprano",
+ "subbass",
+ "tab",
+ "tenor",
+ "tenorG",
+ "tenorvarC",
+ "treble",
+ "varC",
+ "varbaritone",
+ "varpercussion",
+ "vaticana-do1",
+ "vaticana-do2",
+ "vaticana-do3",
+ "vaticana-fa1",
+ "vaticana-fa2",
+ "violin",
+]
+
+scales = [
+ "aeolian",
+ "dorian",
+ "ionian",
+ "locrian",
+ "lydian",
+ "major",
+ "minor",
+ "mixolydian",
+ "phrygian",
+]
+
+repeat_types = [
+ "percent",
+ "segno",
+ "unfold",
+ "volta",
+]
+
+units = [
+ "cm",
+ "in",
+ "mm",
+ "pt",
+ "staff-space",
+]
+
+chord_modifiers = [
+ "aug",
+ "dim",
+ "m",
+ "maj",
+]
+
+pitch_language_names = [
+ "arabic",
+ "catalan",
+ "català",
+ "deutsch",
+ "english",
+ "espanol",
+ "español",
+ "français",
+ "italiano",
+ "nederlands",
+ "norsk",
+ "portugues",
+ "português",
+ "suomi",
+ "svenska",
+ "vlaams",
+]
+
+pitches = [
+ "R",
+ "a",
+ "a-flat",
+ "a-flatflat",
+ "a-natural",
+ "a-sharp",
+ "a-sharpsharp",
+ "ab",
+ "acousticbassdrum",
+ "acousticsnare",
+ "ad",
+ "adb",
+ "add",
+ "aeh",
+ "aes",
+ "aeseh",
+ "aeses",
+ "aess",
+ "aesseh",
+ "aessess",
+ "af",
+ "aff",
+ "afhb",
+ "afhd",
+ "agh",
+ "agl",
+ "ah",
+ "aih",
+ "ais",
+ "aisih",
+ "aisis",
+ "aiss",
+ "aissih",
+ "aississ",
+ "aqf",
+ "aqs",
+ "as",
+ "asah",
+ "asas",
+ "aseh",
+ "ases",
+ "ashb",
+ "ashd",
+ "ass",
+ "asseh",
+ "assess",
+ "atqb",
+ "atqd",
+ "atqf",
+ "atqs",
+ "ax",
+ "b",
+ "b-flat",
+ "b-flatflat",
+ "b-natural",
+ "b-sharp",
+ "b-sharpsharp",
+ "bassdrum",
+ "bb",
+ "bd",
+ "bda",
+ "bdb",
+ "bdd",
+ "beh",
+ "bes",
+ "beseh",
+ "beses",
+ "bess",
+ "bf",
+ "bff",
+ "bfhb",
+ "bfhd",
+ "bih",
+ "bis",
+ "bisih",
+ "bisis",
+ "boh",
+ "bohm",
+ "boho",
+ "bol",
+ "bolm",
+ "bolo",
+ "bqf",
+ "bqs",
+ "bs",
+ "bshb",
+ "bshd",
+ "bss",
+ "btqb",
+ "btqd",
+ "btqf",
+ "btqs",
+ "bx",
+ "c",
+ "c-flat",
+ "c-flatflat",
+ "c-natural",
+ "c-sharp",
+ "c-sharpsharp",
+ "cab",
+ "cabasa",
+ "cb",
+ "cd",
+ "cdb",
+ "cdd",
+ "ceh",
+ "ces",
+ "ceseh",
+ "ceses",
+ "cess",
+ "cesseh",
+ "cessess",
+ "cf",
+ "cff",
+ "cfhb",
+ "cfhd",
+ "cgh",
+ "cghm",
+ "cgho",
+ "cgl",
+ "cglm",
+ "cglo",
+ "chinesecymbal",
+ "cih",
+ "cis",
+ "cisih",
+ "cisis",
+ "ciss",
+ "cissih",
+ "cississ",
+ "cl",
+ "claves",
+ "closedhihat",
+ "cowbell",
+ "cqf",
+ "cqs",
+ "crashcymbal",
+ "crashcymbala",
+ "crashcymbalb",
+ "cs",
+ "cshb",
+ "cshd",
+ "css",
+ "ctqb",
+ "ctqd",
+ "ctqf",
+ "ctqs",
+ "cuim",
+ "cuio",
+ "cx",
+ "cymc",
+ "cymca",
+ "cymcb",
+ "cymch",
+ "cymr",
+ "cymra",
+ "cymrb",
+ "cyms",
+ "d",
+ "d-flat",
+ "d-flatflat",
+ "d-natural",
+ "d-sharp",
+ "d-sharpsharp",
+ "db",
+ "dd",
+ "ddb",
+ "ddd",
+ "deh",
+ "des",
+ "deseh",
+ "deses",
+ "dess",
+ "desseh",
+ "dessess",
+ "df",
+ "dff",
+ "dfhb",
+ "dfhd",
+ "dih",
+ "dis",
+ "disih",
+ "disis",
+ "diss",
+ "dissih",
+ "dississ",
+ "do",
+ "dob",
+ "dobb",
+ "dobhb",
+ "dobqt",
+ "dobsb",
+ "dobtqt",
+ "docb",
+ "docs",
+ "dod",
+ "dodd",
+ "dodsd",
+ "dohb",
+ "dohk",
+ "dok",
+ "dokhk",
+ "dokk",
+ "doqb",
+ "doqd",
+ "doqs",
+ "dos",
+ "dosb",
+ "dosd",
+ "dosqt",
+ "doss",
+ "dostqt",
+ "dotcb",
+ "dotcs",
+ "dotqb",
+ "dotqd",
+ "dotqs",
+ "dox",
+ "dqf",
+ "dqs",
+ "ds",
+ "dshb",
+ "dshd",
+ "dss",
+ "dtqb",
+ "dtqd",
+ "dtqf",
+ "dtqs",
+ "dx",
+ "e",
+ "e-flat",
+ "e-flatflat",
+ "e-natural",
+ "e-sharp",
+ "e-sharpsharp",
+ "eb",
+ "ed",
+ "edb",
+ "edd",
+ "eeh",
+ "ees",
+ "eeseh",
+ "eeses",
+ "eess",
+ "eesseh",
+ "eessess",
+ "ef",
+ "eff",
+ "efhb",
+ "efhd",
+ "eh",
+ "eih",
+ "eis",
+ "eisih",
+ "eisis",
+ "eiss",
+ "eissih",
+ "eississ",
+ "electricsnare",
+ "eqf",
+ "eqs",
+ "es",
+ "eseh",
+ "eses",
+ "eshb",
+ "eshd",
+ "ess",
+ "esseh",
+ "essess",
+ "etqb",
+ "etqd",
+ "etqf",
+ "etqs",
+ "ex",
+ "f",
+ "f-flat",
+ "f-flatflat",
+ "f-natural",
+ "f-sharp",
+ "f-sharpsharp",
+ "fa",
+ "fab",
+ "fabb",
+ "fabhb",
+ "fabqt",
+ "fabsb",
+ "fabtqt",
+ "facb",
+ "facs",
+ "fad",
+ "fadd",
+ "fadsd",
+ "fahb",
+ "fahk",
+ "fak",
+ "fakhk",
+ "fakk",
+ "faqb",
+ "faqd",
+ "faqs",
+ "fas",
+ "fasb",
+ "fasd",
+ "fasqt",
+ "fass",
+ "fastqt",
+ "fatcb",
+ "fatcs",
+ "fatqb",
+ "fatqd",
+ "fatqs",
+ "fax",
+ "fb",
+ "fd",
+ "fdb",
+ "fdd",
+ "feh",
+ "fes",
+ "feseh",
+ "feses",
+ "fess",
+ "fesseh",
+ "fessess",
+ "ff",
+ "fff",
+ "ffhb",
+ "ffhd",
+ "fih",
+ "fis",
+ "fisih",
+ "fisis",
+ "fiss",
+ "fissih",
+ "fississ",
+ "fqf",
+ "fqs",
+ "fs",
+ "fshb",
+ "fshd",
+ "fss",
+ "ftqb",
+ "ftqd",
+ "ftqf",
+ "ftqs",
+ "fx",
+ "g",
+ "g-flat",
+ "g-flatflat",
+ "g-natural",
+ "g-sharp",
+ "g-sharpsharp",
+ "gb",
+ "gd",
+ "gdb",
+ "gdd",
+ "geh",
+ "ges",
+ "geseh",
+ "geses",
+ "gess",
+ "gesseh",
+ "gessess",
+ "gf",
+ "gff",
+ "gfhb",
+ "gfhd",
+ "gih",
+ "gis",
+ "gisih",
+ "gisis",
+ "giss",
+ "gissih",
+ "gississ",
+ "gqf",
+ "gqs",
+ "gs",
+ "gshb",
+ "gshd",
+ "gss",
+ "gtqb",
+ "gtqd",
+ "gtqf",
+ "gtqs",
+ "gui",
+ "guil",
+ "guiro",
+ "guis",
+ "gx",
+ "h",
+ "halfopenhihat",
+ "handclap",
+ "hc",
+ "heh",
+ "heseh",
+ "heses",
+ "hesseh",
+ "hessess",
+ "hh",
+ "hhc",
+ "hhho",
+ "hho",
+ "hhp",
+ "hiagogo",
+ "hibongo",
+ "hiconga",
+ "highfloortom",
+ "hightom",
+ "hih",
+ "hihat",
+ "himidtom",
+ "his",
+ "hisidestick",
+ "hisih",
+ "hisis",
+ "hiss",
+ "hissih",
+ "hississ",
+ "hitimbale",
+ "hiwoodblock",
+ "la",
+ "lab",
+ "labb",
+ "labhb",
+ "labqt",
+ "labsb",
+ "labtqt",
+ "lacb",
+ "lacs",
+ "lad",
+ "ladd",
+ "ladsd",
+ "lahb",
+ "lahk",
+ "lak",
+ "lakhk",
+ "lakk",
+ "laqb",
+ "laqd",
+ "laqs",
+ "las",
+ "lasb",
+ "lasd",
+ "lasqt",
+ "lass",
+ "lastqt",
+ "latcb",
+ "latcs",
+ "latqb",
+ "latqd",
+ "latqs",
+ "lax",
+ "loagogo",
+ "lobongo",
+ "loconga",
+ "longguiro",
+ "longwhistle",
+ "losidestick",
+ "lotimbale",
+ "lowfloortom",
+ "lowmidtom",
+ "lowoodblock",
+ "lowtom",
+ "mar",
+ "maracas",
+ "mi",
+ "mib",
+ "mibb",
+ "mibhb",
+ "mibqt",
+ "mibsb",
+ "mibtqt",
+ "micb",
+ "mics",
+ "mid",
+ "midd",
+ "midsd",
+ "mihb",
+ "mihk",
+ "mik",
+ "mikhk",
+ "mikk",
+ "miqb",
+ "miqd",
+ "miqs",
+ "mis",
+ "misb",
+ "misd",
+ "misqt",
+ "miss",
+ "mistqt",
+ "mitcb",
+ "mitcs",
+ "mitqb",
+ "mitqd",
+ "mitqs",
+ "mix",
+ "mutecuica",
+ "mutehibongo",
+ "mutehiconga",
+ "mutelobongo",
+ "muteloconga",
+ "mutetriangle",
+ "opencuica",
+ "openhibongo",
+ "openhiconga",
+ "openhihat",
+ "openlobongo",
+ "openloconga",
+ "opentriangle",
+ "pedalhihat",
+ "r",
+ "rb",
+ "re",
+ "reb",
+ "rebb",
+ "rebhb",
+ "rebqt",
+ "rebsb",
+ "rebtqt",
+ "recb",
+ "recs",
+ "red",
+ "redd",
+ "redsd",
+ "rehb",
+ "rehk",
+ "rek",
+ "rekhk",
+ "rekk",
+ "reqb",
+ "reqd",
+ "reqs",
+ "res",
+ "resb",
+ "resd",
+ "resqt",
+ "ress",
+ "restqt",
+ "retcb",
+ "retcs",
+ "retqb",
+ "retqd",
+ "retqs",
+ "rex",
+ "ridebell",
+ "ridecymbal",
+ "ridecymbala",
+ "ridecymbalb",
+ "ré",
+ "réb",
+ "rébb",
+ "rébsb",
+ "réd",
+ "rédd",
+ "rédsd",
+ "résb",
+ "résd",
+ "réx",
+ "shortguiro",
+ "shortwhistle",
+ "si",
+ "sib",
+ "sibb",
+ "sibhb",
+ "sibqt",
+ "sibsb",
+ "sibtqt",
+ "sicb",
+ "sics",
+ "sid",
+ "sidd",
+ "sidestick",
+ "sidsd",
+ "sihb",
+ "sihk",
+ "sik",
+ "sikhk",
+ "sikk",
+ "siqb",
+ "siqd",
+ "siqs",
+ "sis",
+ "sisb",
+ "sisd",
+ "sisqt",
+ "siss",
+ "sistqt",
+ "sitcb",
+ "sitcs",
+ "sitqb",
+ "sitqd",
+ "sitqs",
+ "six",
+ "sn",
+ "sna",
+ "snare",
+ "sne",
+ "sol",
+ "solb",
+ "solbb",
+ "solbhb",
+ "solbqt",
+ "solbsb",
+ "solbtqt",
+ "solcb",
+ "solcs",
+ "sold",
+ "soldd",
+ "soldsd",
+ "solhb",
+ "solhk",
+ "solk",
+ "solkhk",
+ "solkk",
+ "solqb",
+ "solqd",
+ "solqs",
+ "sols",
+ "solsb",
+ "solsd",
+ "solsqt",
+ "solss",
+ "solstqt",
+ "soltcb",
+ "soltcs",
+ "soltqb",
+ "soltqd",
+ "soltqs",
+ "solx",
+ "splashcymbal",
+ "ss",
+ "ssh",
+ "ssl",
+ "tamb",
+ "tambourine",
+ "timh",
+ "timl",
+ "tomfh",
+ "tomfl",
+ "tomh",
+ "toml",
+ "tommh",
+ "tomml",
+ "tri",
+ "triangle",
+ "trim",
+ "trio",
+ "tt",
+ "vibraslap",
+ "vibs",
+ "wbh",
+ "wbl",
+ "whl",
+ "whs",
+]
+
+music_functions = [
+ "=",
+ "absolute",
+ "acciaccatura",
+ "accidentalStyle",
+ "addChordShape",
+ "addInstrumentDefinition",
+ "addQuote",
+ "after",
+ "afterGrace",
+ "allowPageTurn",
+ "allowVoltaHook",
+ "alterBroken",
+ "alternative",
+ "ambitusAfter",
+ "appendToTag",
+ "applyContext",
+ "applyMusic",
+ "applyOutput",
+ "appoggiatura",
+ "assertBeamQuant",
+ "assertBeamSlope",
+ "autoChange",
+ "balloonGrobText",
+ "balloonText",
+ "bar",
+ "barNumberCheck",
+ "beamExceptions",
+ "bendAfter",
+ "bendHold",
+ "bendStartLevel",
+ "bookOutputName",
+ "bookOutputSuffix",
+ "breathe",
+ "caesura",
+ "change",
+ "chordRepeats",
+ "clef",
+ "codaMark",
+ "compoundMeter",
+ "compressMMRests",
+ "crossStaff",
+ "cueClef",
+ "cueClefUnset",
+ "cueDuring",
+ "cueDuringWithClef",
+ "deadNote",
+ "defineBarLine",
+ "displayLilyMusic",
+ "displayMusic",
+ "displayScheme",
+ "dropNote",
+ "enablePolymeter",
+ "endSpanners",
+ "eventChords",
+ "featherDurations",
+ "finger",
+ "fixed",
+ "footnote",
+ "grace",
+ "grobdescriptions",
+ "harmonicByFret",
+ "harmonicByRatio",
+ "harmonicNote",
+ "harmonicsOn",
+ "hide",
+ "inStaffSegno",
+ "incipit",
+ "inherit-acceptability",
+ "instrumentSwitch",
+ "inversion",
+ "invertChords",
+ "jump",
+ "keepWithTag",
+ "key",
+ "killCues",
+ "label",
+ "language",
+ "languageRestore",
+ "languageSaveAndChange",
+ "magnifyMusic",
+ "magnifyStaff",
+ "makeClusters",
+ "makeDefaultStringTuning",
+ "mark",
+ "markupMap",
+ "modalInversion",
+ "modalTranspose",
+ "musicMap",
+ "noPageBreak",
+ "noPageTurn",
+ "octaveCheck",
+ "offset",
+ "omit",
+ "once",
+ "ottava",
+ "override",
+ "overrideProperty",
+ "overrideTimeSignatureSettings",
+ "pageBreak",
+ "pageTurn",
+ "palmMute",
+ "palmMuteOn",
+ "parallelMusic",
+ "parenthesize",
+ "partCombine",
+ "partCombineDown",
+ "partCombineForce",
+ "partCombineUp",
+ "partial",
+ "phrasingSlurDashPattern",
+ "pitchedTrill",
+ "pointAndClickOff",
+ "pointAndClickOn",
+ "pointAndClickTypes",
+ "preBend",
+ "preBendHold",
+ "propertyOverride",
+ "propertyRevert",
+ "propertySet",
+ "propertyTweak",
+ "propertyUnset",
+ "pushToTag",
+ "quoteDuring",
+ "raiseNote",
+ "reduceChords",
+ "relative",
+ "removeWithTag",
+ "repeat",
+ "resetRelativeOctave",
+ "retrograde",
+ "revert",
+ "revertTimeSignatureSettings",
+ "rightHandFinger",
+ "scaleDurations",
+ "sectionLabel",
+ "segnoMark",
+ "set",
+ "settingsFrom",
+ "shape",
+ "shiftDurations",
+ "single",
+ "skip",
+ "slashedGrace",
+ "slurDashPattern",
+ "storePredefinedDiagram",
+ "stringTuning",
+ "styledNoteHeads",
+ "tabChordRepeats",
+ "tabChordRepetition",
+ "tag",
+ "tagGroup",
+ "tempo",
+ "temporary",
+ "tieDashPattern",
+ "time",
+ "times",
+ "tocItem",
+ "transpose",
+ "transposedCueDuring",
+ "transposition",
+ "tuplet",
+ "tupletSpan",
+ "tweak",
+ "undo",
+ "unfoldRepeats",
+ "unfolded",
+ "unset",
+ "voices",
+ "void",
+ "volta",
+ "vshape",
+ "withMusicProperty",
+ "xNote",
+]
+
+dynamics = [
+ "!",
+ "<",
+ ">",
+ "cr",
+ "cresc",
+ "decr",
+ "decresc",
+ "dim",
+ "endcr",
+ "endcresc",
+ "enddecr",
+ "enddecresc",
+ "enddim",
+ "f",
+ "ff",
+ "fff",
+ "ffff",
+ "fffff",
+ "fp",
+ "fz",
+ "mf",
+ "mp",
+ "n",
+ "p",
+ "pp",
+ "ppp",
+ "pppp",
+ "ppppp",
+ "rfz",
+ "sf",
+ "sff",
+ "sfp",
+ "sfz",
+ "sp",
+ "spp",
+]
+
+articulations = [
+ "(",
+ ")",
+ "-",
+ "[",
+ "]",
+ "^",
+ "accent",
+ "arpeggio",
+ "breakDynamicSpan",
+ "coda",
+ "dashBang",
+ "dashDash",
+ "dashDot",
+ "dashHat",
+ "dashLarger",
+ "dashPlus",
+ "dashUnderscore",
+ "downbow",
+ "downmordent",
+ "downprall",
+ "episemFinis",
+ "episemInitium",
+ "espressivo",
+ "fermata",
+ "flageolet",
+ "glide",
+ "glissando",
+ "halfopen",
+ "harmonic",
+ "haydnturn",
+ "henzelongfermata",
+ "henzeshortfermata",
+ "laissezVibrer",
+ "lheel",
+ "lineprall",
+ "longfermata",
+ "ltoe",
+ "marcato",
+ "mordent",
+ "noBeam",
+ "open",
+ "portato",
+ "prall",
+ "pralldown",
+ "prallmordent",
+ "prallprall",
+ "prallup",
+ "repeatTie",
+ "reverseturn",
+ "rheel",
+ "rtoe",
+ "segno",
+ "shortfermata",
+ "signumcongruentiae",
+ "slashturn",
+ "snappizzicato",
+ "sostenutoOff",
+ "sostenutoOn",
+ "staccatissimo",
+ "staccato",
+ "startGraceSlur",
+ "startGroup",
+ "startTextSpan",
+ "startTrillSpan",
+ "stopGraceSlur",
+ "stopGroup",
+ "stopTextSpan",
+ "stopTrillSpan",
+ "stopped",
+ "sustainOff",
+ "sustainOn",
+ "tenuto",
+ "thumb",
+ "treCorde",
+ "trill",
+ "turn",
+ "unaCorda",
+ "upbow",
+ "upmordent",
+ "upprall",
+ "varcoda",
+ "verylongfermata",
+ "veryshortfermata",
+ "vowelTransition",
+ "~",
+]
+
+music_commands = [
+ "[",
+ "]",
+ "aikenHeads",
+ "aikenHeadsMinor",
+ "aikenThinHeads",
+ "aikenThinHeadsMinor",
+ "allowBreak",
+ "arabicStringNumbers",
+ "arpeggioArrowDown",
+ "arpeggioArrowUp",
+ "arpeggioBracket",
+ "arpeggioNormal",
+ "arpeggioParenthesis",
+ "arpeggioParenthesisDashed",
+ "autoBeamOff",
+ "autoBeamOn",
+ "autoBreaksOff",
+ "autoBreaksOn",
+ "autoLineBreaksOff",
+ "autoLineBreaksOn",
+ "autoPageBreaksOff",
+ "autoPageBreaksOn",
+ "balloonLengthOff",
+ "balloonLengthOn",
+ "bassFigureExtendersOff",
+ "bassFigureExtendersOn",
+ "bassFigureStaffAlignmentDown",
+ "bassFigureStaffAlignmentNeutral",
+ "bassFigureStaffAlignmentUp",
+ "break",
+ "cadenzaOff",
+ "cadenzaOn",
+ "compressEmptyMeasures",
+ "crescHairpin",
+ "crescTextCresc",
+ "deadNotesOff",
+ "deadNotesOn",
+ "defaultNoteHeads",
+ "defaultTimeSignature",
+ "deprecatedcresc",
+ "deprecateddim",
+ "deprecatedendcresc",
+ "deprecatedenddim",
+ "dimHairpin",
+ "dimTextDecr",
+ "dimTextDecresc",
+ "dimTextDim",
+ "dotsDown",
+ "dotsNeutral",
+ "dotsUp",
+ "dynamicDown",
+ "dynamicNeutral",
+ "dynamicUp",
+ "easyHeadsOff",
+ "easyHeadsOn",
+ "endSkipNCs",
+ "expandEmptyMeasures",
+ "fine",
+ "frenchChords",
+ "funkHeads",
+ "funkHeadsMinor",
+ "germanChords",
+ "harmonicsOff",
+ "hideNotes",
+ "hideSplitTiedTabNotes",
+ "hideStaffSwitch",
+ "huge",
+ "ignatzekExceptionMusic",
+ "improvisationOff",
+ "improvisationOn",
+ "italianChords",
+ "kievanOff",
+ "kievanOn",
+ "large",
+ "markLengthOff",
+ "markLengthOn",
+ "medianChordGridStyle",
+ "melisma",
+ "melismaEnd",
+ "mergeDifferentlyDottedOff",
+ "mergeDifferentlyDottedOn",
+ "mergeDifferentlyHeadedOff",
+ "mergeDifferentlyHeadedOn",
+ "newSpacingSection",
+ "noBreak",
+ "normalsize",
+ "numericTimeSignature",
+ "oneVoice",
+ "palmMuteOff",
+ "partCombineApart",
+ "partCombineAutomatic",
+ "partCombineChords",
+ "partCombineSoloI",
+ "partCombineSoloII",
+ "partCombineUnisono",
+ "phrasingSlurDashed",
+ "phrasingSlurDotted",
+ "phrasingSlurDown",
+ "phrasingSlurHalfDashed",
+ "phrasingSlurHalfSolid",
+ "phrasingSlurNeutral",
+ "phrasingSlurSolid",
+ "phrasingSlurUp",
+ "predefinedFretboardsOff",
+ "predefinedFretboardsOn",
+ "romanStringNumbers",
+ "sacredHarpHeads",
+ "sacredHarpHeadsMinor",
+ "section",
+ "semiGermanChords",
+ "setDefaultDurationToQuarter",
+ "shiftOff",
+ "shiftOn",
+ "shiftOnn",
+ "shiftOnnn",
+ "showSplitTiedTabNotes",
+ "showStaffSwitch",
+ "skipNC",
+ "skipNCs",
+ "slurDashed",
+ "slurDotted",
+ "slurDown",
+ "slurHalfDashed",
+ "slurHalfSolid",
+ "slurNeutral",
+ "slurSolid",
+ "slurUp",
+ "small",
+ "southernHarmonyHeads",
+ "southernHarmonyHeadsMinor",
+ "startAcciaccaturaMusic",
+ "startAppoggiaturaMusic",
+ "startGraceMusic",
+ "startMeasureCount",
+ "startMeasureSpanner",
+ "startSlashedGraceMusic",
+ "startStaff",
+ "stemDown",
+ "stemNeutral",
+ "stemUp",
+ "stopAcciaccaturaMusic",
+ "stopAppoggiaturaMusic",
+ "stopGraceMusic",
+ "stopMeasureCount",
+ "stopMeasureSpanner",
+ "stopSlashedGraceMusic",
+ "stopStaff",
+ "tabFullNotation",
+ "teeny",
+ "textLengthOff",
+ "textLengthOn",
+ "textSpannerDown",
+ "textSpannerNeutral",
+ "textSpannerUp",
+ "tieDashed",
+ "tieDotted",
+ "tieDown",
+ "tieHalfDashed",
+ "tieHalfSolid",
+ "tieNeutral",
+ "tieSolid",
+ "tieUp",
+ "tiny",
+ "tupletDown",
+ "tupletNeutral",
+ "tupletUp",
+ "unHideNotes",
+ "voiceFour",
+ "voiceFourStyle",
+ "voiceNeutralStyle",
+ "voiceOne",
+ "voiceOneStyle",
+ "voiceThree",
+ "voiceThreeStyle",
+ "voiceTwo",
+ "voiceTwoStyle",
+ "walkerHeads",
+ "walkerHeadsMinor",
+ "xNotesOff",
+ "xNotesOn",
+ "|",
+ "~",
+]
+
+markup_commands = [
+ "abs-fontsize",
+ "accidental",
+ "align-on-other",
+ "arrow-head",
+ "auto-footnote",
+ "backslashed-digit",
+ "beam",
+ "bold",
+ "box",
+ "bracket",
+ "caps",
+ "center-align",
+ "center-column",
+ "char",
+ "circle",
+ "coda",
+ "column",
+ "column-lines",
+ "combine",
+ "compound-meter",
+ "concat",
+ "conditional-circle-markup",
+ "customTabClef",
+ "dir-column",
+ "discant",
+ "doubleflat",
+ "doublesharp",
+ "draw-circle",
+ "draw-dashed-line",
+ "draw-dotted-line",
+ "draw-hline",
+ "draw-line",
+ "draw-squiggle-line",
+ "dynamic",
+ "ellipse",
+ "epsfile",
+ "eyeglasses",
+ "fermata",
+ "fill-line",
+ "fill-with-pattern",
+ "filled-box",
+ "finger",
+ "first-visible",
+ "flat",
+ "fontCaps",
+ "fontsize",
+ "footnote",
+ "fraction",
+ "freeBass",
+ "fret-diagram",
+ "fret-diagram-terse",
+ "fret-diagram-verbose",
+ "fromproperty",
+ "general-align",
+ "halign",
+ "harp-pedal",
+ "hbracket",
+ "hcenter-in",
+ "hspace",
+ "huge",
+ "if",
+ "italic",
+ "justified-lines",
+ "justify",
+ "justify-field",
+ "justify-line",
+ "justify-string",
+ "large",
+ "larger",
+ "left-align",
+ "left-brace",
+ "left-column",
+ "line",
+ "lookup",
+ "lower",
+ "magnify",
+ "map-markup-commands",
+ "markalphabet",
+ "markletter",
+ "markup",
+ "markuplist",
+ "medium",
+ "multi-measure-rest-by-number",
+ "musicglyph",
+ "natural",
+ "normal-size-sub",
+ "normal-size-super",
+ "normal-text",
+ "normalsize",
+ "note",
+ "note-by-number",
+ "null",
+ "number",
+ "on-the-fly",
+ "oval",
+ "overlay",
+ "override",
+ "override-lines",
+ "overtie",
+ "pad-around",
+ "pad-markup",
+ "pad-to-box",
+ "pad-x",
+ "page-link",
+ "page-ref",
+ "parenthesize",
+ "path",
+ "pattern",
+ "polygon",
+ "postscript",
+ "property-recursive",
+ "put-adjacent",
+ "raise",
+ "replace",
+ "rest",
+ "rest-by-number",
+ "rhythm",
+ "right-align",
+ "right-brace",
+ "right-column",
+ "roman",
+ "rotate",
+ "rounded-box",
+ "sans",
+ "scale",
+ "score",
+ "score-lines",
+ "segno",
+ "semiflat",
+ "semisharp",
+ "sesquiflat",
+ "sesquisharp",
+ "sharp",
+ "simple",
+ "slashed-digit",
+ "small",
+ "smallCaps",
+ "smaller",
+ "stdBass",
+ "stdBassIV",
+ "stdBassV",
+ "stdBassVI",
+ "stencil",
+ "string-lines",
+ "strut",
+ "sub",
+ "super",
+ "table",
+ "table-of-contents",
+ "teeny",
+ "text",
+ "tie",
+ "tied-lyric",
+ "tiny",
+ "translate",
+ "translate-scaled",
+ "transparent",
+ "triangle",
+ "typewriter",
+ "underline",
+ "undertie",
+ "unless",
+ "upright",
+ "varcoda",
+ "vcenter",
+ "verbatim-file",
+ "vspace",
+ "whiteout",
+ "with-color",
+ "with-dimension",
+ "with-dimension-from",
+ "with-dimensions",
+ "with-dimensions-from",
+ "with-link",
+ "with-outline",
+ "with-string-transformer",
+ "with-true-dimension",
+ "with-true-dimensions",
+ "with-url",
+ "woodwind-diagram",
+ "wordwrap",
+ "wordwrap-field",
+ "wordwrap-internal",
+ "wordwrap-lines",
+ "wordwrap-string",
+ "wordwrap-string-internal",
+]
+
+grobs = [
+ "Accidental",
+ "AccidentalCautionary",
+ "AccidentalPlacement",
+ "AccidentalSuggestion",
+ "Ambitus",
+ "AmbitusAccidental",
+ "AmbitusLine",
+ "AmbitusNoteHead",
+ "Arpeggio",
+ "BalloonText",
+ "BarLine",
+ "BarNumber",
+ "BassFigure",
+ "BassFigureAlignment",
+ "BassFigureAlignmentPositioning",
+ "BassFigureBracket",
+ "BassFigureContinuation",
+ "BassFigureLine",
+ "Beam",
+ "BendAfter",
+ "BendSpanner",
+ "BreakAlignGroup",
+ "BreakAlignment",
+ "BreathingSign",
+ "CenteredBarNumber",
+ "CenteredBarNumberLineSpanner",
+ "ChordName",
+ "ChordSquare",
+ "Clef",
+ "ClefModifier",
+ "ClusterSpanner",
+ "ClusterSpannerBeacon",
+ "CodaMark",
+ "CombineTextScript",
+ "ControlPoint",
+ "ControlPolygon",
+ "CueClef",
+ "CueEndClef",
+ "Custos",
+ "DotColumn",
+ "Dots",
+ "DoublePercentRepeat",
+ "DoublePercentRepeatCounter",
+ "DoubleRepeatSlash",
+ "DurationLine",
+ "DynamicLineSpanner",
+ "DynamicText",
+ "DynamicTextSpanner",
+ "Episema",
+ "FingerGlideSpanner",
+ "Fingering",
+ "FingeringColumn",
+ "Flag",
+ "Footnote",
+ "FretBoard",
+ "Glissando",
+ "GraceSpacing",
+ "GridChordName",
+ "GridLine",
+ "GridPoint",
+ "Hairpin",
+ "HorizontalBracket",
+ "HorizontalBracketText",
+ "InstrumentName",
+ "InstrumentSwitch",
+ "JumpScript",
+ "KeyCancellation",
+ "KeySignature",
+ "KievanLigature",
+ "LaissezVibrerTie",
+ "LaissezVibrerTieColumn",
+ "LedgerLineSpanner",
+ "LeftEdge",
+ "LigatureBracket",
+ "LyricExtender",
+ "LyricHyphen",
+ "LyricRepeatCount",
+ "LyricSpace",
+ "LyricText",
+ "MeasureCounter",
+ "MeasureGrouping",
+ "MeasureSpanner",
+ "MelodyItem",
+ "MensuralLigature",
+ "MetronomeMark",
+ "MultiMeasureRest",
+ "MultiMeasureRestNumber",
+ "MultiMeasureRestScript",
+ "MultiMeasureRestText",
+ "NonMusicalPaperColumn",
+ "NoteCollision",
+ "NoteColumn",
+ "NoteHead",
+ "NoteName",
+ "NoteSpacing",
+ "OttavaBracket",
+ "PaperColumn",
+ "Parentheses",
+ "PercentRepeat",
+ "PercentRepeatCounter",
+ "PhrasingSlur",
+ "PianoPedalBracket",
+ "RehearsalMark",
+ "RepeatSlash",
+ "RepeatTie",
+ "RepeatTieColumn",
+ "Rest",
+ "RestCollision",
+ "Script",
+ "ScriptColumn",
+ "ScriptRow",
+ "SectionLabel",
+ "SegnoMark",
+ "SignumRepetitionis",
+ "Slur",
+ "SostenutoPedal",
+ "SostenutoPedalLineSpanner",
+ "SpacingSpanner",
+ "SpanBar",
+ "SpanBarStub",
+ "StaffEllipsis",
+ "StaffGrouper",
+ "StaffSpacing",
+ "StaffSymbol",
+ "StanzaNumber",
+ "Stem",
+ "StemStub",
+ "StemTremolo",
+ "StringNumber",
+ "StrokeFinger",
+ "SustainPedal",
+ "SustainPedalLineSpanner",
+ "System",
+ "SystemStartBar",
+ "SystemStartBrace",
+ "SystemStartBracket",
+ "SystemStartSquare",
+ "TabNoteHead",
+ "TextScript",
+ "TextSpanner",
+ "Tie",
+ "TieColumn",
+ "TimeSignature",
+ "TrillPitchAccidental",
+ "TrillPitchGroup",
+ "TrillPitchHead",
+ "TrillPitchParentheses",
+ "TrillSpanner",
+ "TupletBracket",
+ "TupletNumber",
+ "UnaCordaPedal",
+ "UnaCordaPedalLineSpanner",
+ "VaticanaLigature",
+ "VerticalAlignment",
+ "VerticalAxisGroup",
+ "VoiceFollower",
+ "VoltaBracket",
+ "VoltaBracketSpanner",
+ "VowelTransition",
+]
+
+contexts = [
+ "ChoirStaff",
+ "ChordGrid",
+ "ChordGridScore",
+ "ChordNames",
+ "CueVoice",
+ "Devnull",
+ "DrumStaff",
+ "DrumVoice",
+ "Dynamics",
+ "FiguredBass",
+ "FretBoards",
+ "Global",
+ "GrandStaff",
+ "GregorianTranscriptionLyrics",
+ "GregorianTranscriptionStaff",
+ "GregorianTranscriptionVoice",
+ "InternalGregorianStaff",
+ "KievanStaff",
+ "KievanVoice",
+ "Lyrics",
+ "MensuralStaff",
+ "MensuralVoice",
+ "NoteNames",
+ "NullVoice",
+ "OneStaff",
+ "PetrucciStaff",
+ "PetrucciVoice",
+ "PianoStaff",
+ "RhythmicStaff",
+ "Score",
+ "Staff",
+ "StaffGroup",
+ "StandaloneRhythmScore",
+ "StandaloneRhythmStaff",
+ "StandaloneRhythmVoice",
+ "TabStaff",
+ "TabVoice",
+ "VaticanaLyrics",
+ "VaticanaStaff",
+ "VaticanaVoice",
+ "Voice",
+]
+
+translators = [
+ "Accidental_engraver",
+ "Alteration_glyph_engraver",
+ "Ambitus_engraver",
+ "Arpeggio_engraver",
+ "Auto_beam_engraver",
+ "Axis_group_engraver",
+ "Balloon_engraver",
+ "Bar_engraver",
+ "Bar_number_engraver",
+ "Beam_collision_engraver",
+ "Beam_engraver",
+ "Beam_performer",
+ "Beat_engraver",
+ "Beat_performer",
+ "Bend_engraver",
+ "Bend_spanner_engraver",
+ "Break_align_engraver",
+ "Breathing_sign_engraver",
+ "Centered_bar_number_align_engraver",
+ "Chord_name_engraver",
+ "Chord_square_engraver",
+ "Chord_tremolo_engraver",
+ "Clef_engraver",
+ "Cluster_spanner_engraver",
+ "Collision_engraver",
+ "Completion_heads_engraver",
+ "Completion_rest_engraver",
+ "Concurrent_hairpin_engraver",
+ "Control_track_performer",
+ "Cue_clef_engraver",
+ "Current_chord_text_engraver",
+ "Custos_engraver",
+ "Dot_column_engraver",
+ "Dots_engraver",
+ "Double_percent_repeat_engraver",
+ "Drum_note_performer",
+ "Drum_notes_engraver",
+ "Duration_line_engraver",
+ "Dynamic_align_engraver",
+ "Dynamic_engraver",
+ "Dynamic_performer",
+ "Episema_engraver",
+ "Extender_engraver",
+ "Figured_bass_engraver",
+ "Figured_bass_position_engraver",
+ "Finger_glide_engraver",
+ "Fingering_column_engraver",
+ "Fingering_engraver",
+ "Font_size_engraver",
+ "Footnote_engraver",
+ "Forbid_line_break_engraver",
+ "Fretboard_engraver",
+ "Glissando_engraver",
+ "Grace_auto_beam_engraver",
+ "Grace_beam_engraver",
+ "Grace_engraver",
+ "Grace_spacing_engraver",
+ "Grid_chord_name_engraver",
+ "Grid_line_span_engraver",
+ "Grid_point_engraver",
+ "Grob_pq_engraver",
+ "Horizontal_bracket_engraver",
+ "Hyphen_engraver",
+ "Instrument_name_engraver",
+ "Instrument_switch_engraver",
+ "Jump_engraver",
+ "Keep_alive_together_engraver",
+ "Key_engraver",
+ "Key_performer",
+ "Kievan_ligature_engraver",
+ "Laissez_vibrer_engraver",
+ "Ledger_line_engraver",
+ "Ligature_bracket_engraver",
+ "Lyric_engraver",
+ "Lyric_performer",
+ "Lyric_repeat_count_engraver",
+ "Mark_engraver",
+ "Mark_performer",
+ "Mark_tracking_translator",
+ "Measure_counter_engraver",
+ "Measure_grouping_engraver",
+ "Measure_spanner_engraver",
+ "Melody_engraver",
+ "Mensural_ligature_engraver",
+ "Merge_mmrest_numbers_engraver",
+ "Merge_rests_engraver",
+ "Metronome_mark_engraver",
+ "Midi_control_change_performer",
+ "Multi_measure_rest_engraver",
+ "New_fingering_engraver",
+ "Note_head_line_engraver",
+ "Note_heads_engraver",
+ "Note_name_engraver",
+ "Note_performer",
+ "Note_spacing_engraver",
+ "Ottava_spanner_engraver",
+ "Output_property_engraver",
+ "Page_turn_engraver",
+ "Paper_column_engraver",
+ "Parenthesis_engraver",
+ "Part_combine_engraver",
+ "Percent_repeat_engraver",
+ "Phrasing_slur_engraver",
+ "Piano_pedal_align_engraver",
+ "Piano_pedal_engraver",
+ "Piano_pedal_performer",
+ "Pitch_squash_engraver",
+ "Pitched_trill_engraver",
+ "Pure_from_neighbor_engraver",
+ "Repeat_acknowledge_engraver",
+ "Repeat_tie_engraver",
+ "Rest_collision_engraver",
+ "Rest_engraver",
+ "Rhythmic_column_engraver",
+ "Script_column_engraver",
+ "Script_engraver",
+ "Script_row_engraver",
+ "Separating_line_group_engraver",
+ "Show_control_points_engraver",
+ "Signum_repetitionis_engraver",
+ "Skip_typesetting_engraver",
+ "Slash_repeat_engraver",
+ "Slur_engraver",
+ "Slur_performer",
+ "Spacing_engraver",
+ "Span_arpeggio_engraver",
+ "Span_bar_engraver",
+ "Span_bar_stub_engraver",
+ "Span_stem_engraver",
+ "Spanner_break_forbid_engraver",
+ "Spanner_tracking_engraver",
+ "Staff_collecting_engraver",
+ "Staff_performer",
+ "Staff_symbol_engraver",
+ "Stanza_number_align_engraver",
+ "Stanza_number_engraver",
+ "Stem_engraver",
+ "System_start_delimiter_engraver",
+ "Tab_note_heads_engraver",
+ "Tab_staff_symbol_engraver",
+ "Tab_tie_follow_engraver",
+ "Tempo_performer",
+ "Text_engraver",
+ "Text_spanner_engraver",
+ "Tie_engraver",
+ "Tie_performer",
+ "Time_signature_engraver",
+ "Time_signature_performer",
+ "Timing_translator",
+ "Trill_spanner_engraver",
+ "Tuplet_engraver",
+ "Tweak_engraver",
+ "Vaticana_ligature_engraver",
+ "Vertical_align_engraver",
+ "Volta_engraver",
+]
+
+scheme_functions = [
+ "!=",
+ "*location*",
+ "*parser*",
+ "Alteration_glyph_engraver",
+ "Beat_performer",
+ "Bend_spanner_engraver",
+ "Breathing_sign_engraver",
+ "Centered_bar_number_align_engraver",
+ "Chord_name_engraver",
+ "Chord_square_engraver",
+ "Current_chord_text_engraver",
+ "Duration_line_engraver",
+ "Finger_glide_engraver",
+ "G_",
+ "Grid_chord_name_engraver",
+ "Lyric_repeat_count_engraver",
+ "Measure_counter_engraver",
+ "Measure_spanner_engraver",
+ "Merge_mmrest_numbers_engraver",
+ "Merge_rests_engraver",
+ "Show_control_points_engraver",
+ "Signum_repetitionis_engraver",
+ "Skip_typesetting_engraver",
+ "Span_stem_engraver",
+ "Spanner_tracking_engraver",
+ "_i",
+ "abs-fontsize-markup",
+ "accidental->markup",
+ "accidental->markup-italian",
+ "accidental-interface::calc-alteration",
+ "accidental-invalid?",
+ "accidental-markup",
+ "add-bar-glyph-print-procedure",
+ "add-font",
+ "add-grace-property",
+ "add-music",
+ "add-music-fonts",
+ "add-new-clef",
+ "add-pango-fonts",
+ "add-point",
+ "add-quotable",
+ "add-score",
+ "add-simple-time-signature-style",
+ "add-stroke-glyph",
+ "add-stroke-straight",
+ "add-text",
+ "adjust-slash-stencil",
+ "align-on-other-markup",
+ "aligned-text-stencil-function",
+ "alist->hash-table",
+ "alist<?",
+ "alist?",
+ "all-bar-numbers-visible",
+ "all-equal?",
+ "all-repeat-counts-visible",
+ "allow-volta-hook",
+ "alteration->text-accidental-markup",
+ "alterations-in-key",
+ "ambitus-line::calc-gap",
+ "ambitus::print",
+ "analyse-spanner-states",
+ "ancestor-lookup-initialize",
+ "angle-0-2pi",
+ "angle-0-360",
+ "annotate-spacing-spec",
+ "annotate-y-interval",
+ "any-mmrest-events",
+ "apply-durations",
+ "apply-group-draw-rule-series",
+ "arrow-head-markup",
+ "arrow-stencil",
+ "arrow-stencil-maker",
+ "assemble-stencils",
+ "assoc-get",
+ "assoc-keys",
+ "assoc-values",
+ "aug-modifier",
+ "auto-footnote-markup",
+ "average",
+ "b",
+ "backslashed-digit-markup",
+ "bar-line::bar-y-extent",
+ "bar-line::calc-blot",
+ "bar-line::calc-break-visibility",
+ "bar-line::calc-glyph-name",
+ "bar-line::calc-glyph-name-for-direction",
+ "bar-line::compound-bar-line",
+ "bar-line::draw-filled-box",
+ "bar-line::widen-bar-extent-on-span",
+ "base-length",
+ "bass-clarinet-rh-ees-key-stencil",
+ "bassoon-bend-info-maker",
+ "bassoon-cc-one-key-stencil",
+ "bassoon-lh-a-flick-key-stencil",
+ "bassoon-lh-c-flick-key-stencil",
+ "bassoon-lh-cis-key-stencil",
+ "bassoon-lh-d-flick-key-stencil",
+ "bassoon-lh-ees-key-stencil",
+ "bassoon-lh-he-key-stencil",
+ "bassoon-lh-hees-key-stencil",
+ "bassoon-lh-lb-key-stencil",
+ "bassoon-lh-lbes-key-stencil",
+ "bassoon-lh-lc-key-stencil",
+ "bassoon-lh-ld-key-stencil",
+ "bassoon-lh-thumb-cis-key-stencil",
+ "bassoon-lh-whisper-key-stencil",
+ "bassoon-midline-rule",
+ "bassoon-rh-bes-key-stencil",
+ "bassoon-rh-cis-key-stencil",
+ "bassoon-rh-f-key-stencil",
+ "bassoon-rh-fis-key-stencil",
+ "bassoon-rh-gis-key-stencil",
+ "bassoon-rh-thumb-bes-key-stencil",
+ "bassoon-rh-thumb-e-key-stencil",
+ "bassoon-rh-thumb-fis-key-stencil",
+ "bassoon-rh-thumb-gis-key-stencil",
+ "bassoon-uber-key-stencil",
+ "beam-exceptions",
+ "beam-markup",
+ "beam::align-with-broken-parts",
+ "beam::get-kievan-positions",
+ "beam::get-kievan-quantized-positions",
+ "beam::place-broken-parts-individually",
+ "beam::slope-like-broken-parts",
+ "beat-grouping-internal",
+ "beat-structure",
+ "bend-spanner::print",
+ "bend::arrow-head-stencil",
+ "bend::calc-bend-x-begin",
+ "bend::calc-bend-x-end",
+ "bend::calc-y-coordinates",
+ "bend::draw-curves",
+ "bend::make-line-curve-stencil",
+ "bend::print",
+ "bend::remove-certain-tab-note-heads",
+ "bend::target-cautionary",
+ "bend::text-stencil",
+ "bend::text-string",
+ "bezier-head-for-stencil",
+ "binary-search",
+ "bold-markup",
+ "book-first-page",
+ "boolean-or-number?",
+ "boolean-or-symbol?",
+ "bounding-note-heads-pitches",
+ "box-grob-stencil",
+ "box-markup",
+ "box-stencil",
+ "bracket-markup",
+ "bracketify-stencil",
+ "break-alignable-interface::self-alignment-of-anchor",
+ "break-alignable-interface::self-alignment-opposite-of-anchor",
+ "break-alignment-list",
+ "breathe::midi-length",
+ "buildflag",
+ "cached-file-contents",
+ "calc-harmonic-pitch",
+ "calc-line-thickness",
+ "calc-repeat-slash-count",
+ "calculate-complex-compound-time",
+ "calculate-compound-base-beat",
+ "calculate-compound-base-beat-full",
+ "calculate-compound-beat-grouping",
+ "calculate-compound-measure-length",
+ "calculate-time-fraction",
+ "call-after-session",
+ "caps-markup",
+ "car-or-identity",
+ "car<",
+ "car<=",
+ "cdr-or-identity",
+ "center-align-markup",
+ "center-column-markup",
+ "centered-spanner-interface::calc-x-offset",
+ "centered-stencil",
+ "chain-assoc-get",
+ "change-pitches",
+ "char-markup",
+ "cheap-list?",
+ "cheap-markup?",
+ "check-beam-quant",
+ "check-beam-slope-sign",
+ "check-broken-spanner",
+ "check-context-path",
+ "check-division-alist",
+ "check-for-annotation",
+ "check-for-replacement",
+ "check-grob-path",
+ "check-music-path",
+ "check-pitch-against-signature",
+ "check-quant-callbacks",
+ "check-slope-callbacks",
+ "chord-name->german-markup",
+ "chord-name->italian-markup",
+ "chord-square::height",
+ "chord-square::print",
+ "chord-square::width",
+ "circle-markup",
+ "circle-stencil",
+ "clarinet-lh-R-key-stencil",
+ "clarinet-lh-a-key-stencil",
+ "clarinet-lh-cis-key-stencil",
+ "clarinet-lh-d-key-stencil",
+ "clarinet-lh-e-key-stencil",
+ "clarinet-lh-ees-key-stencil",
+ "clarinet-lh-f-key-stencil",
+ "clarinet-lh-fis-key-stencil",
+ "clarinet-lh-gis-key-stencil",
+ "clarinet-lh-thumb-key-stencil",
+ "clarinet-rh-b-key-stencil",
+ "clarinet-rh-d-key-stencil",
+ "clarinet-rh-e-key-stencil",
+ "clarinet-rh-f-key-stencil",
+ "clarinet-rh-fis-key-stencil",
+ "clarinet-rh-four-key-stencil",
+ "clarinet-rh-gis-key-stencil",
+ "clarinet-rh-low-c-key-stencil",
+ "clarinet-rh-low-cis-key-stencil",
+ "clarinet-rh-low-d-key-stencil",
+ "clarinet-rh-one-key-stencil",
+ "clarinet-rh-three-key-stencil",
+ "clarinet-rh-two-key-stencil",
+ "clef-transposition-markup",
+ "clef::print-modern-tab-if-set",
+ "clip-systems-to-region-stencils",
+ "clipped-systems-stencils",
+ "close-enough?",
+ "close-port-rename",
+ "coda-markup",
+ "collect-book-music-for-book",
+ "collect-bookpart-for-book",
+ "collect-music-aux",
+ "collect-music-for-book",
+ "collect-scores-for-book",
+ "color?",
+ "column-circle-stencil",
+ "column-lines-markup-list",
+ "column-markup",
+ "combine-markup",
+ "comparable-note-events",
+ "comparator-from-key",
+ "compile-all-markup-args",
+ "compile-all-markup-expressions",
+ "compile-markup-arg",
+ "compile-markup-expression",
+ "completize-formats",
+ "completize-grob-entry",
+ "compound-meter-markup",
+ "concat-markup",
+ "conditional-circle-markup-markup",
+ "conditional-kern-before",
+ "conditional-string-capitalize",
+ "configuration",
+ "cons-fret",
+ "constante-hairpin",
+ "construct-chord-elements",
+ "context-defs-from-music",
+ "context-mod-from-music",
+ "context-spec-music",
+ "control-point::calc-offset",
+ "control-polygon::calc-text",
+ "coord-axis",
+ "coord-rotate",
+ "coord-rotated",
+ "coord-scale",
+ "coord-translate",
+ "coord-x",
+ "coord-y",
+ "copy-binary-file",
+ "copy-repeat-chord",
+ "count-list",
+ "create-file-exclusive",
+ "create-fretboard",
+ "create-glyph-flag",
+ "cross-staff-connect",
+ "css-color",
+ "cue-substitute",
+ "current-or-previous-voice-states",
+ "customTabClef-markup",
+ "cyclic-base-value",
+ "debugf",
+ "def-grace-function",
+ "default-auto-beam-check",
+ "default-flag",
+ "default-paren-color",
+ "define-bar-line",
+ "define-event-class",
+ "define-event-function",
+ "define-fonts",
+ "define-grob-property",
+ "define-internal-grob-property",
+ "define-markup-command",
+ "define-markup-command-internal",
+ "define-markup-list-command",
+ "define-music-function",
+ "define-scheme-function",
+ "define-session",
+ "define-session-public",
+ "define-syntax-function",
+ "define-syntax-public",
+ "define-syntax-rule-public",
+ "define-tag-group",
+ "define-void-function",
+ "degree-first-true",
+ "degrees->radians",
+ "descend-to-context",
+ "determine-frets",
+ "determine-split-list",
+ "determine-string-fret-finger",
+ "dim-modifier",
+ "dimension-arrows",
+ "dir-basename",
+ "dir-column-markup",
+ "display-lily-music",
+ "display-music",
+ "display-scheme-music",
+ "dodecaphonic-no-repeat-rule",
+ "done?",
+ "dot-has-color",
+ "dot-is-inverted",
+ "dot-is-parenthesized",
+ "dots::calc-dot-count",
+ "dots::calc-staff-position",
+ "doubleflat-markup",
+ "doublesharp-markup",
+ "draw-circle-markup",
+ "draw-dashed-line-markup",
+ "draw-dotted-line-markup",
+ "draw-hline-markup",
+ "draw-line-markup",
+ "draw-squiggle-line-markup",
+ "dump-zombies",
+ "duration",
+ "duration-dot-factor",
+ "duration-length",
+ "duration-line::calc",
+ "duration-line::calc-thickness",
+ "duration-line::print",
+ "duration-log-factor",
+ "duration-of-note",
+ "duration-or-music?",
+ "duration-visual",
+ "duration-visual-length",
+ "dynamic-markup",
+ "dynamic-text-spanner::before-line-breaking",
+ "elbowed-hairpin",
+ "ellipse-markup",
+ "ellipse-radius",
+ "ellipse-stencil",
+ "empty-music",
+ "end-broken-spanner?",
+ "entry-greater-than-x?",
+ "eps-file->stencil",
+ "epsfile-markup",
+ "eval-carefully",
+ "event-cause",
+ "event-chord-notes",
+ "event-chord-pitches",
+ "event-chord-reduce",
+ "event-chord-wrap!",
+ "event-class-cons",
+ "event-has-articulation?",
+ "events",
+ "every-nth-bar-number-visible",
+ "every-nth-repeat-count-visible",
+ "exact-rational?",
+ "expand-repeat-chords!",
+ "expand-repeat-notes!",
+ "extent-combine",
+ "extract-alteration",
+ "extract-beam-exceptions",
+ "extract-music",
+ "extract-named-music",
+ "extract-typed-music",
+ "eyeglasses-markup",
+ "fermata-markup",
+ "fill-line-markup",
+ "fill-with-pattern-markup",
+ "filled-box-markup",
+ "filtered-map",
+ "find-named-props",
+ "find-pitch-entry",
+ "find-value-to-offset",
+ "finger-glide::print",
+ "finger-markup",
+ "fingering::calc-text",
+ "first-assoc",
+ "first-bar-number-invisible",
+ "first-bar-number-invisible-and-no-parenthesized-bar-numbers",
+ "first-bar-number-invisible-save-broken-bars",
+ "first-broken-spanner?",
+ "first-member",
+ "first-visible-markup",
+ "flared-hairpin",
+ "flat-flag",
+ "flat-markup",
+ "flatten-alist",
+ "flatten-list",
+ "flip-stencil",
+ "flute-lh-b-key-stencil",
+ "flute-lh-bes-key-stencil",
+ "flute-lh-gis-key-stencil",
+ "flute-lh-gis-rh-bes-key-stencil",
+ "flute-rh-b-key-stencil",
+ "flute-rh-bes-key-stencil",
+ "flute-rh-c-key-stencil",
+ "flute-rh-cis-key-stencil",
+ "flute-rh-d-key-stencil",
+ "flute-rh-dis-key-stencil",
+ "flute-rh-ees-key-stencil",
+ "flute-rh-gz-key-stencil",
+ "fold-some-music",
+ "font-children",
+ "font-default",
+ "font-name-split",
+ "font-name-style",
+ "font-qualifier",
+ "fontCaps-markup",
+ "fontsize-markup",
+ "footnote-markup",
+ "for-some-music",
+ "forced-configuration",
+ "format",
+ "format-bass-figure",
+ "format-coda-mark",
+ "format-compound-time",
+ "format-dal-segno-text",
+ "format-dal-segno-text-brief",
+ "format-mark-alphabet",
+ "format-mark-barnumbers",
+ "format-mark-box-alphabet",
+ "format-mark-box-barnumbers",
+ "format-mark-box-letters",
+ "format-mark-box-numbers",
+ "format-mark-circle-alphabet",
+ "format-mark-circle-barnumbers",
+ "format-mark-circle-letters",
+ "format-mark-circle-numbers",
+ "format-mark-generic",
+ "format-mark-letters",
+ "format-mark-numbers",
+ "format-metronome-markup",
+ "format-segno-mark",
+ "format-segno-mark-considering-bar-lines",
+ "format-sign-with-number",
+ "format-time-element",
+ "format-time-fraction",
+ "format-time-list",
+ "format-time-numerator",
+ "format-varcoda-mark",
+ "fraction->moment",
+ "fraction-markup",
+ "fraction?",
+ "fret->pitch",
+ "fret-board::calc-stencil",
+ "fret-count",
+ "fret-diagram-markup",
+ "fret-diagram-terse-markup",
+ "fret-diagram-verbose-markup",
+ "fret-letter-tablature-format",
+ "fret-number-tablature-format",
+ "fret-number-tablature-format-banjo",
+ "fret-parse-definition-string",
+ "fret-parse-marking-list",
+ "fret-parse-terse-definition-string",
+ "fromproperty-markup",
+ "function-chain",
+ "g",
+ "g-lookup-font",
+ "general-align-markup",
+ "general-column",
+ "generate-bassoon-family-entry",
+ "generate-clarinet-family-entry",
+ "generate-crop-stencil",
+ "generate-flute-family-entry",
+ "generate-oboe-family-entry",
+ "generate-preview-stencil",
+ "generate-saxophone-family-entry",
+ "generate-system-stencils",
+ "generate-tin-whistle-family-entry",
+ "get-bound-note-heads",
+ "get-chord-shape",
+ "get-current-filename",
+ "get-current-suffix",
+ "get-fill-space",
+ "get-key",
+ "get-named-spreadsheet-column",
+ "get-next-unique-voice-name",
+ "get-numeric-from-key",
+ "get-outfile-name",
+ "get-postscript-bbox",
+ "get-quarter-diffs",
+ "get-setting",
+ "get-slope-offset",
+ "get-span-glyph",
+ "get-spreadsheet-column",
+ "get-step",
+ "get-sub-list",
+ "get-top-most-tab-head",
+ "get-tweakable-music",
+ "get-woodwind-key-list",
+ "glissando::calc-tab-extra-dy",
+ "glissando::draw-tab-glissando",
+ "glyph->stencil",
+ "glyph-flag",
+ "grace-spacing::calc-shortest-duration",
+ "gray-colorize",
+ "grid-chord-name::calc-X-offset",
+ "grid-chord-name::calc-Y-offset",
+ "grid-chord-name::calc-offset-on-axis",
+ "grob-interpret-markup",
+ "grob-list?",
+ "grob-transformer",
+ "grob::all-objects",
+ "grob::calc-property-by-copy",
+ "grob::compose-function",
+ "grob::display-objects",
+ "grob::has-interface",
+ "grob::inherit-parent-property",
+ "grob::is-live?",
+ "grob::name",
+ "grob::objects-from-interface",
+ "grob::offset-function",
+ "grob::relay-other-property",
+ "grob::rhythmic-location",
+ "grob::show-skylines-if-debug-skylines-set",
+ "grob::unpure-Y-extent-from-stencil",
+ "grob::when",
+ "group-automate-rule",
+ "group-draw-rule",
+ "group-extra-offset-rule",
+ "gs-cmd-args",
+ "gs-safe-run",
+ "hairpin::calc-grow-direction",
+ "halign-markup",
+ "harp-pedal-check",
+ "harp-pedal-info",
+ "harp-pedal-markup",
+ "harp-pedals-parse-string",
+ "has-at-least-two?",
+ "has-one-or-less?",
+ "hash-table->alist",
+ "hbracket-markup",
+ "hcenter-in-markup",
+ "header-to-file",
+ "headers-property-alist-chain",
+ "hook-stencil",
+ "horizontal-slash-interval",
+ "hspace-markup",
+ "huge-markup",
+ "if-markup",
+ "ignatzek-chord-names",
+ "index-cell",
+ "index-or-markup?",
+ "index?",
+ "insert-markups",
+ "internal-set-paper-size",
+ "interpret-markup",
+ "interpret-markup-list",
+ "interval-bound",
+ "interval-center",
+ "interval-contains?",
+ "interval-empty?",
+ "interval-end",
+ "interval-index",
+ "interval-intersection",
+ "interval-length",
+ "interval-sane?",
+ "interval-scale",
+ "interval-start",
+ "interval-union",
+ "interval-widen",
+ "invalidate-alterations",
+ "inverter-factory",
+ "is-absolute?",
+ "is-square?",
+ "italic-markup",
+ "item::extra-spacing-height-including-staff",
+ "justified-lines-markup-list",
+ "justify-field-markup",
+ "justify-line-helper",
+ "justify-line-markup",
+ "justify-markup",
+ "justify-string-markup",
+ "key-crawler",
+ "key-entry-alteration",
+ "key-entry-bar-number",
+ "key-entry-end-mom",
+ "key-entry-notename",
+ "key-entry-octave",
+ "key-fill-translate",
+ "key-list-or-music?",
+ "key-list-or-symbol?",
+ "key-list?",
+ "key-signature-interface::alteration-position",
+ "key-signature-interface::alteration-positions",
+ "key?",
+ "keyword->make-markup",
+ "large-markup",
+ "larger-markup",
+ "layout-blot-diameter",
+ "layout-extract-page-properties",
+ "layout-line-thickness",
+ "layout-set-absolute-staff-size",
+ "layout-set-absolute-staff-size-in-module",
+ "layout-set-staff-size",
+ "left-align-markup",
+ "left-brace-markup",
+ "left-column-markup",
+ "lexicographic-list-compare?",
+ "lh-woodwind-text-stencil",
+ "lilypond-all",
+ "lilypond-file",
+ "lilypond-main",
+ "lilypond-version",
+ "lilypond-version-outdated?",
+ "line-markup",
+ "list-all-possible-keys",
+ "list-all-possible-keys-verbose",
+ "list-element-index",
+ "list-insert-separator",
+ "list-join",
+ "listener->once-listener",
+ "little-elliptical-key-stencil",
+ "long-midline-stencil",
+ "lookup-font",
+ "lookup-markup",
+ "lookup-markup-command",
+ "lookup-markup-command-aux",
+ "lookup-markup-list-command",
+ "lookup-paper-name",
+ "low-bass-clarinet-rh-ees-key-stencil",
+ "lower-markup",
+ "ly-getcwd",
+ "ly-type?",
+ "ly:accidental-interface::height",
+ "ly:accidental-interface::horizontal-skylines",
+ "ly:accidental-interface::print",
+ "ly:accidental-interface::remove-tied",
+ "ly:accidental-placement::calc-positioning-done",
+ "ly:add-context-mod",
+ "ly:add-interface",
+ "ly:add-listener",
+ "ly:add-option",
+ "ly:align-interface::align-to-ideal-distances",
+ "ly:align-interface::align-to-minimum-distances",
+ "ly:all-grob-interfaces",
+ "ly:all-options",
+ "ly:all-output-backend-commands",
+ "ly:all-stencil-commands",
+ "ly:all-stencil-expressions",
+ "ly:alternative-sequence-iterator::constructor",
+ "ly:angle",
+ "ly:apply-context-iterator::constructor",
+ "ly:arpeggio::brew-chord-bracket",
+ "ly:arpeggio::brew-chord-slur",
+ "ly:arpeggio::calc-cross-staff",
+ "ly:arpeggio::calc-positions",
+ "ly:arpeggio::print",
+ "ly:arpeggio::pure-height",
+ "ly:arpeggio::width",
+ "ly:assoc-get",
+ "ly:axis-group-interface::add-element",
+ "ly:axis-group-interface::adjacent-pure-heights",
+ "ly:axis-group-interface::calc-pure-relevant-grobs",
+ "ly:axis-group-interface::calc-pure-staff-staff-spacing",
+ "ly:axis-group-interface::calc-pure-y-common",
+ "ly:axis-group-interface::calc-skylines",
+ "ly:axis-group-interface::calc-staff-staff-spacing",
+ "ly:axis-group-interface::calc-x-common",
+ "ly:axis-group-interface::calc-y-common",
+ "ly:axis-group-interface::combine-skylines",
+ "ly:axis-group-interface::height",
+ "ly:axis-group-interface::pure-height",
+ "ly:axis-group-interface::width",
+ "ly:balloon-interface::print",
+ "ly:balloon-interface::pure-height",
+ "ly:balloon-interface::remove-irrelevant-spanner",
+ "ly:balloon-interface::width",
+ "ly:bar-check-iterator::constructor",
+ "ly:bar-line::calc-anchor",
+ "ly:bar-line::calc-bar-extent",
+ "ly:bar-line::print",
+ "ly:basic-progress",
+ "ly:beam::calc-beam-segments",
+ "ly:beam::calc-beaming",
+ "ly:beam::calc-cross-staff",
+ "ly:beam::calc-direction",
+ "ly:beam::calc-normal-stems",
+ "ly:beam::calc-stem-shorten",
+ "ly:beam::calc-x-positions",
+ "ly:beam::print",
+ "ly:beam::pure-rest-collision-callback",
+ "ly:beam::quanting",
+ "ly:beam::rest-collision-callback",
+ "ly:beam::set-stem-lengths",
+ "ly:bezier-extent",
+ "ly:bezier-extract",
+ "ly:book-add-bookpart!",
+ "ly:book-add-score!",
+ "ly:book-book-parts",
+ "ly:book-header",
+ "ly:book-paper",
+ "ly:book-process",
+ "ly:book-process-to-systems",
+ "ly:book-scores",
+ "ly:book-set-header!",
+ "ly:book?",
+ "ly:bp",
+ "ly:bracket",
+ "ly:break-alignable-interface::find-parent",
+ "ly:break-alignable-interface::self-align-callback",
+ "ly:break-aligned-interface::calc-average-anchor",
+ "ly:break-aligned-interface::calc-break-visibility",
+ "ly:break-aligned-interface::calc-extent-aligned-anchor",
+ "ly:break-aligned-interface::calc-joint-anchor-alignment",
+ "ly:break-alignment-interface::calc-positioning-done",
+ "ly:breathing-sign::divisio-maior",
+ "ly:breathing-sign::divisio-maxima",
+ "ly:breathing-sign::divisio-minima",
+ "ly:breathing-sign::finalis",
+ "ly:breathing-sign::offset-callback",
+ "ly:breathing-sign::set-breath-properties",
+ "ly:broadcast",
+ "ly:cairo-output-stencil",
+ "ly:cairo-output-stencils",
+ "ly:calculated-sequential-music::length",
+ "ly:calculated-sequential-music::start",
+ "ly:camel-case->lisp-identifier",
+ "ly:chain-assoc-get",
+ "ly:change-iterator::constructor",
+ "ly:check-expected-warnings",
+ "ly:chord-name::after-line-breaking",
+ "ly:clef-modifier::calc-parent-alignment",
+ "ly:clef::calc-glyph-name",
+ "ly:clef::print",
+ "ly:cluster-beacon::height",
+ "ly:cluster::calc-cross-staff",
+ "ly:cluster::print",
+ "ly:cm",
+ "ly:command-line-code",
+ "ly:command-line-options",
+ "ly:connect-dispatchers",
+ "ly:context-current-moment",
+ "ly:context-def-lookup",
+ "ly:context-def-modify",
+ "ly:context-def?",
+ "ly:context-event-source",
+ "ly:context-events-below",
+ "ly:context-find",
+ "ly:context-grob-definition",
+ "ly:context-id",
+ "ly:context-matched-pop-property",
+ "ly:context-mod-apply!",
+ "ly:context-mod?",
+ "ly:context-name",
+ "ly:context-output-def",
+ "ly:context-parent",
+ "ly:context-property",
+ "ly:context-property-where-defined",
+ "ly:context-pushpop-property",
+ "ly:context-set-property!",
+ "ly:context-specced-music-iterator::constructor",
+ "ly:context-unset-property",
+ "ly:context?",
+ "ly:custos::print",
+ "ly:debug",
+ "ly:default-scale",
+ "ly:dimension?",
+ "ly:dir?",
+ "ly:directed",
+ "ly:disconnect-dispatchers",
+ "ly:dispatcher?",
+ "ly:dot-column::calc-positioning-done",
+ "ly:dots::print",
+ "ly:duration->string",
+ "ly:duration-compress",
+ "ly:duration-dot-count",
+ "ly:duration-factor",
+ "ly:duration-length",
+ "ly:duration-log",
+ "ly:duration-scale",
+ "ly:duration::less?",
+ "ly:duration<?",
+ "ly:duration?",
+ "ly:effective-prefix",
+ "ly:enclosing-bracket::print",
+ "ly:enclosing-bracket::width",
+ "ly:engraver-announce-end-grob",
+ "ly:engraver-make-grob",
+ "ly:engraver-make-item",
+ "ly:engraver-make-spanner",
+ "ly:engraver-make-sticky",
+ "ly:error",
+ "ly:event-chord-iterator::constructor",
+ "ly:event-deep-copy",
+ "ly:event-iterator::constructor",
+ "ly:event-property",
+ "ly:event-set-property!",
+ "ly:event-warning",
+ "ly:event?",
+ "ly:exit",
+ "ly:expect-warning",
+ "ly:extract-subfont-from-collection",
+ "ly:figured-bass-continuation::center-on-figures",
+ "ly:figured-bass-continuation::print",
+ "ly:find-file",
+ "ly:fine-iterator::constructor",
+ "ly:fingering-column::calc-positioning-done",
+ "ly:flag::calc-x-offset",
+ "ly:flag::calc-y-offset",
+ "ly:flag::glyph-name",
+ "ly:flag::print",
+ "ly:flag::pure-calc-y-offset",
+ "ly:flag::width",
+ "ly:font-config-add-directory",
+ "ly:font-config-add-font",
+ "ly:font-config-display-fonts",
+ "ly:font-config-get-font-file",
+ "ly:font-design-size",
+ "ly:font-file-name",
+ "ly:font-get-glyph",
+ "ly:font-glyph-name-to-charcode",
+ "ly:font-glyph-name-to-index",
+ "ly:font-index-to-charcode",
+ "ly:font-magnification",
+ "ly:font-metric?",
+ "ly:font-name",
+ "ly:font-sub-fonts",
+ "ly:format",
+ "ly:format-output",
+ "ly:generic-bound-extent",
+ "ly:get-all-function-documentation",
+ "ly:get-all-translators",
+ "ly:get-cff-offset",
+ "ly:get-context-mods",
+ "ly:get-font-format",
+ "ly:get-option",
+ "ly:get-spacing-spec",
+ "ly:grace-iterator::constructor",
+ "ly:grace-music::start-callback",
+ "ly:grid-line-interface::print",
+ "ly:grid-line-interface::width",
+ "ly:grob-alist-chain",
+ "ly:grob-array->list",
+ "ly:grob-array-length",
+ "ly:grob-array-ref",
+ "ly:grob-array?",
+ "ly:grob-basic-properties",
+ "ly:grob-chain-callback",
+ "ly:grob-common-refpoint",
+ "ly:grob-common-refpoint-of-array",
+ "ly:grob-default-font",
+ "ly:grob-extent",
+ "ly:grob-get-vertical-axis-group-index",
+ "ly:grob-interfaces",
+ "ly:grob-layout",
+ "ly:grob-list->grob-array",
+ "ly:grob-object",
+ "ly:grob-original",
+ "ly:grob-parent",
+ "ly:grob-pq<?",
+ "ly:grob-properties?",
+ "ly:grob-property",
+ "ly:grob-property-data",
+ "ly:grob-pure-height",
+ "ly:grob-pure-property",
+ "ly:grob-relative-coordinate",
+ "ly:grob-robust-relative-extent",
+ "ly:grob-script-priority-less",
+ "ly:grob-set-nested-property!",
+ "ly:grob-set-object!",
+ "ly:grob-set-parent!",
+ "ly:grob-set-property!",
+ "ly:grob-spanned-column-rank-interval",
+ "ly:grob-staff-position",
+ "ly:grob-suicide!",
+ "ly:grob-system",
+ "ly:grob-translate-axis!",
+ "ly:grob-vertical<?",
+ "ly:grob-warning",
+ "ly:grob::horizontal-skylines-from-element-stencils",
+ "ly:grob::horizontal-skylines-from-stencil",
+ "ly:grob::pure-horizontal-skylines-from-element-stencils",
+ "ly:grob::pure-simple-horizontal-skylines-from-extents",
+ "ly:grob::pure-simple-vertical-skylines-from-extents",
+ "ly:grob::pure-stencil-height",
+ "ly:grob::pure-vertical-skylines-from-element-stencils",
+ "ly:grob::simple-horizontal-skylines-from-extents",
+ "ly:grob::simple-vertical-skylines-from-extents",
+ "ly:grob::stencil-height",
+ "ly:grob::stencil-width",
+ "ly:grob::vertical-skylines-from-element-stencils",
+ "ly:grob::vertical-skylines-from-stencil",
+ "ly:grob::x-parent-positioning",
+ "ly:grob::y-parent-positioning",
+ "ly:grob?",
+ "ly:gs-cli",
+ "ly:gulp-file",
+ "ly:gulp-file-utf8",
+ "ly:hairpin::broken-bound-padding",
+ "ly:hairpin::print",
+ "ly:hairpin::pure-height",
+ "ly:hara-kiri-group-spanner::calc-skylines",
+ "ly:hara-kiri-group-spanner::force-hara-kiri-callback",
+ "ly:hara-kiri-group-spanner::force-hara-kiri-in-y-parent-callback",
+ "ly:hara-kiri-group-spanner::pure-height",
+ "ly:hara-kiri-group-spanner::y-extent",
+ "ly:has-glyph-names?",
+ "ly:hash-table-keys",
+ "ly:horizontal-bracket-text::calc-direction",
+ "ly:horizontal-bracket-text::print",
+ "ly:horizontal-bracket::print",
+ "ly:horizontal-line-spanner::calc-left-bound-info",
+ "ly:horizontal-line-spanner::calc-left-bound-info-and-text",
+ "ly:horizontal-line-spanner::calc-right-bound-info",
+ "ly:in-event-class?",
+ "ly:inch",
+ "ly:input-both-locations",
+ "ly:input-file-line-char-column",
+ "ly:input-location?",
+ "ly:input-message",
+ "ly:input-warning",
+ "ly:interpret-music-expression",
+ "ly:intlog2",
+ "ly:item-break-dir",
+ "ly:item-get-column",
+ "ly:item?",
+ "ly:iterator?",
+ "ly:key-signature-interface::print",
+ "ly:kievan-ligature::print",
+ "ly:ledger-line-spanner::print",
+ "ly:ledger-line-spanner::set-spacing-rods",
+ "ly:length",
+ "ly:lily-lexer?",
+ "ly:lily-parser?",
+ "ly:line-interface::line",
+ "ly:line-spanner::calc-cross-staff",
+ "ly:line-spanner::calc-left-bound-info",
+ "ly:line-spanner::calc-left-bound-info-and-text",
+ "ly:line-spanner::calc-right-bound-info",
+ "ly:line-spanner::print",
+ "ly:list->offsets",
+ "ly:listened-event-class?",
+ "ly:listened-event-types",
+ "ly:listener?",
+ "ly:load",
+ "ly:lyric-combine-music-iterator::constructor",
+ "ly:lyric-combine-music::length-callback",
+ "ly:lyric-extender::print",
+ "ly:lyric-hyphen::print",
+ "ly:lyric-hyphen::set-spacing-rods",
+ "ly:make-book",
+ "ly:make-book-part",
+ "ly:make-context-mod",
+ "ly:make-dispatcher",
+ "ly:make-duration",
+ "ly:make-event-class",
+ "ly:make-global-context",
+ "ly:make-global-translator",
+ "ly:make-grob-properties",
+ "ly:make-listener",
+ "ly:make-moment",
+ "ly:make-music",
+ "ly:make-music-function",
+ "ly:make-music-relative!",
+ "ly:make-output-def",
+ "ly:make-page-label-marker",
+ "ly:make-page-permission-marker",
+ "ly:make-pango-description-string",
+ "ly:make-paper-outputter",
+ "ly:make-pitch",
+ "ly:make-prob",
+ "ly:make-rotation",
+ "ly:make-scale",
+ "ly:make-scaling",
+ "ly:make-score",
+ "ly:make-spring",
+ "ly:make-stencil",
+ "ly:make-stream-event",
+ "ly:make-transform",
+ "ly:make-translation",
+ "ly:make-unpure-pure-container",
+ "ly:measure-grouping::print",
+ "ly:measure-spanner::calc-connect-to-neighbors",
+ "ly:measure-spanner::print",
+ "ly:melody-spanner::calc-neutral-stem-direction",
+ "ly:mensural-ligature::brew-ligature-primitive",
+ "ly:mensural-ligature::print",
+ "ly:message",
+ "ly:minimal-breaking",
+ "ly:mm",
+ "ly:module->alist",
+ "ly:module-copy",
+ "ly:modules-lookup",
+ "ly:moment-add",
+ "ly:moment-div",
+ "ly:moment-grace",
+ "ly:moment-grace-denominator",
+ "ly:moment-grace-numerator",
+ "ly:moment-main",
+ "ly:moment-main-denominator",
+ "ly:moment-main-numerator",
+ "ly:moment-mod",
+ "ly:moment-mul",
+ "ly:moment-sub",
+ "ly:moment<?",
+ "ly:moment?",
+ "ly:multi-measure-rest::height",
+ "ly:multi-measure-rest::print",
+ "ly:multi-measure-rest::set-spacing-rods",
+ "ly:multi-measure-rest::set-text-rods",
+ "ly:music-compress",
+ "ly:music-deep-copy",
+ "ly:music-duration-compress",
+ "ly:music-duration-length",
+ "ly:music-error",
+ "ly:music-function-extract",
+ "ly:music-function-signature",
+ "ly:music-function?",
+ "ly:music-iterator::constructor",
+ "ly:music-length",
+ "ly:music-list?",
+ "ly:music-message",
+ "ly:music-mutable-properties",
+ "ly:music-output?",
+ "ly:music-property",
+ "ly:music-sequence::cumulative-length-callback",
+ "ly:music-sequence::event-chord-length-callback",
+ "ly:music-sequence::event-chord-relative-callback",
+ "ly:music-sequence::first-start-callback",
+ "ly:music-sequence::maximum-length-callback",
+ "ly:music-sequence::minimum-start-callback",
+ "ly:music-sequence::simultaneous-relative-callback",
+ "ly:music-set-property!",
+ "ly:music-start",
+ "ly:music-transpose",
+ "ly:music-warning",
+ "ly:music-wrapper-iterator::constructor",
+ "ly:music-wrapper::length-callback",
+ "ly:music-wrapper::start-callback",
+ "ly:music::duration-length-callback",
+ "ly:music?",
+ "ly:non-fatal-error",
+ "ly:note-collision-interface::calc-positioning-done",
+ "ly:note-column-accidentals",
+ "ly:note-column-dot-column",
+ "ly:note-column::calc-main-extent",
+ "ly:note-extra-source-file",
+ "ly:note-head::calc-stem-attachment",
+ "ly:note-head::calc-tab-stem-attachment",
+ "ly:note-head::include-ledger-line-height",
+ "ly:note-head::print",
+ "ly:note-head::stem-attachment",
+ "ly:note-head::stem-x-shift",
+ "ly:number->string",
+ "ly:number-pair->string",
+ "ly:one-line-auto-height-breaking",
+ "ly:one-line-breaking",
+ "ly:one-page-breaking",
+ "ly:optimal-breaking",
+ "ly:option-usage",
+ "ly:otf->cff",
+ "ly:otf-font-glyph-info",
+ "ly:otf-font-table-data",
+ "ly:otf-font?",
+ "ly:otf-glyph-count",
+ "ly:otf-glyph-list",
+ "ly:ottava-bracket::print",
+ "ly:output-def-clone",
+ "ly:output-def-lookup",
+ "ly:output-def-parent",
+ "ly:output-def-scope",
+ "ly:output-def-set-variable!",
+ "ly:output-def?",
+ "ly:output-description",
+ "ly:output-find-context-def",
+ "ly:outputter-close",
+ "ly:outputter-dump-stencil",
+ "ly:outputter-dump-string",
+ "ly:outputter-output-scheme",
+ "ly:outputter-port",
+ "ly:page-marker?",
+ "ly:page-turn-breaking",
+ "ly:pango-font-physical-fonts",
+ "ly:pango-font?",
+ "ly:paper-book-header",
+ "ly:paper-book-pages",
+ "ly:paper-book-paper",
+ "ly:paper-book-performances",
+ "ly:paper-book-scopes",
+ "ly:paper-book-systems",
+ "ly:paper-book?",
+ "ly:paper-column::break-align-width",
+ "ly:paper-column::print",
+ "ly:paper-fonts",
+ "ly:paper-get-font",
+ "ly:paper-get-number",
+ "ly:paper-outputscale",
+ "ly:paper-score-paper-systems",
+ "ly:paper-system-minimum-distance",
+ "ly:paper-system?",
+ "ly:parse-file",
+ "ly:parse-init",
+ "ly:parse-string-expression",
+ "ly:parsed-undead-list!",
+ "ly:parser-clear-error",
+ "ly:parser-clone",
+ "ly:parser-define!",
+ "ly:parser-error",
+ "ly:parser-has-error?",
+ "ly:parser-include-string",
+ "ly:parser-lookup",
+ "ly:parser-output-name",
+ "ly:parser-parse-string",
+ "ly:parser-set-note-names",
+ "ly:part-combine-iterator::constructor",
+ "ly:partial-iterator::constructor",
+ "ly:partial-iterator::finalization",
+ "ly:percent-repeat-interface::beat-slash",
+ "ly:percent-repeat-interface::double-percent",
+ "ly:percent-repeat-interface::percent",
+ "ly:percent-repeat-iterator::constructor",
+ "ly:performance-headers",
+ "ly:performance-write",
+ "ly:piano-pedal-bracket::print",
+ "ly:pitch-alteration",
+ "ly:pitch-diff",
+ "ly:pitch-negate",
+ "ly:pitch-notename",
+ "ly:pitch-octave",
+ "ly:pitch-quartertones",
+ "ly:pitch-semitones",
+ "ly:pitch-steps",
+ "ly:pitch-tones",
+ "ly:pitch-transpose",
+ "ly:pitch::less?",
+ "ly:pitch<?",
+ "ly:pitch?",
+ "ly:pointer-group-interface::add-grob",
+ "ly:pop-property-iterator::constructor",
+ "ly:position-on-line?",
+ "ly:prob-immutable-properties",
+ "ly:prob-mutable-properties",
+ "ly:prob-property",
+ "ly:prob-property?",
+ "ly:prob-set-property!",
+ "ly:prob-type?",
+ "ly:prob?",
+ "ly:programming-error",
+ "ly:progress",
+ "ly:property-iterator::constructor",
+ "ly:property-lookup-stats",
+ "ly:property-unset-iterator::constructor",
+ "ly:pt",
+ "ly:pure-call",
+ "ly:pure-from-neighbor-interface::calc-pure-relevant-grobs",
+ "ly:push-property-iterator::constructor",
+ "ly:quote-iterator::constructor",
+ "ly:randomize-rand-seed",
+ "ly:register-stencil-expression",
+ "ly:register-translator",
+ "ly:relative-group-extent",
+ "ly:relative-octave-check::relative-callback",
+ "ly:relative-octave-music::no-relative-callback",
+ "ly:relative-octave-music::relative-callback",
+ "ly:rename-file",
+ "ly:reset-all-fonts",
+ "ly:rest-collision::calc-positioning-done",
+ "ly:rest-collision::force-shift-callback-rest",
+ "ly:rest::calc-cross-staff",
+ "ly:rest::height",
+ "ly:rest::print",
+ "ly:rest::pure-height",
+ "ly:rest::width",
+ "ly:rest::y-offset-callback",
+ "ly:rhythmic-music-iterator::constructor",
+ "ly:round-filled-box",
+ "ly:round-polygon",
+ "ly:run-translator",
+ "ly:score-add-output-def!",
+ "ly:score-embedded-format",
+ "ly:score-error?",
+ "ly:score-header",
+ "ly:score-music",
+ "ly:score-output-defs",
+ "ly:score-set-header!",
+ "ly:score?",
+ "ly:script-column::before-line-breaking",
+ "ly:script-column::row-before-line-breaking",
+ "ly:script-interface::calc-cross-staff",
+ "ly:script-interface::calc-direction",
+ "ly:script-interface::calc-positioning-done",
+ "ly:script-interface::print",
+ "ly:self-alignment-interface::aligned-on-x-parent",
+ "ly:self-alignment-interface::aligned-on-y-parent",
+ "ly:self-alignment-interface::centered-on-x-parent",
+ "ly:self-alignment-interface::centered-on-y-parent",
+ "ly:self-alignment-interface::pure-y-aligned-on-self",
+ "ly:self-alignment-interface::x-aligned-on-self",
+ "ly:self-alignment-interface::y-aligned-on-self",
+ "ly:semi-tie-column::calc-head-direction",
+ "ly:semi-tie-column::calc-positioning-done",
+ "ly:semi-tie::calc-control-points",
+ "ly:separation-item::calc-skylines",
+ "ly:sequential-iterator::constructor",
+ "ly:set-color-names",
+ "ly:set-default-scale",
+ "ly:set-grob-creation-callback",
+ "ly:set-grob-modification-callback",
+ "ly:set-middle-C!",
+ "ly:set-option",
+ "ly:set-origin!",
+ "ly:set-property-cache-callback",
+ "ly:side-position-interface::calc-cross-staff",
+ "ly:side-position-interface::move-to-extremal-staff",
+ "ly:side-position-interface::pure-y-aligned-side",
+ "ly:side-position-interface::x-aligned-side",
+ "ly:side-position-interface::y-aligned-side",
+ "ly:simple-music-iterator::constructor",
+ "ly:simultaneous-music-iterator::constructor",
+ "ly:skyline-distance",
+ "ly:skyline-empty?",
+ "ly:skyline-height",
+ "ly:skyline-max-height",
+ "ly:skyline-max-height-position",
+ "ly:skyline-pad",
+ "ly:skyline-pair?",
+ "ly:skyline-touching-point",
+ "ly:skyline?",
+ "ly:skylines-for-stencil",
+ "ly:slur::calc-control-points",
+ "ly:slur::calc-cross-staff",
+ "ly:slur::calc-direction",
+ "ly:slur::height",
+ "ly:slur::outside-slur-callback",
+ "ly:slur::outside-slur-cross-staff",
+ "ly:slur::print",
+ "ly:slur::pure-height",
+ "ly:slur::pure-outside-slur-callback",
+ "ly:smob-protects",
+ "ly:solve-spring-rod-problem",
+ "ly:source-file?",
+ "ly:source-files",
+ "ly:spacing-spanner::calc-common-shortest-duration",
+ "ly:spacing-spanner::set-springs",
+ "ly:span-bar::before-line-breaking",
+ "ly:span-bar::calc-anchor",
+ "ly:span-bar::calc-glyph-name",
+ "ly:span-bar::choose-model-bar-line",
+ "ly:span-bar::print",
+ "ly:span-bar::width",
+ "ly:spanner-bound",
+ "ly:spanner-broken-into",
+ "ly:spanner-set-bound!",
+ "ly:spanner::bounds-width",
+ "ly:spanner::calc-normalized-endpoints",
+ "ly:spanner::kill-zero-spanned-time",
+ "ly:spanner::set-spacing-rods",
+ "ly:spanner?",
+ "ly:spawn",
+ "ly:spring-set-inverse-compress-strength!",
+ "ly:spring-set-inverse-stretch-strength!",
+ "ly:spring?",
+ "ly:staff-symbol-line-thickness",
+ "ly:staff-symbol-referencer::callback",
+ "ly:staff-symbol-staff-radius",
+ "ly:staff-symbol-staff-space",
+ "ly:staff-symbol::height",
+ "ly:staff-symbol::print",
+ "ly:stderr-redirect",
+ "ly:stem-tremolo::calc-cross-staff",
+ "ly:stem-tremolo::calc-direction",
+ "ly:stem-tremolo::calc-shape",
+ "ly:stem-tremolo::calc-slope",
+ "ly:stem-tremolo::calc-width",
+ "ly:stem-tremolo::calc-y-offset",
+ "ly:stem-tremolo::print",
+ "ly:stem-tremolo::pure-calc-y-offset",
+ "ly:stem-tremolo::pure-height",
+ "ly:stem-tremolo::width",
+ "ly:stem::calc-cross-staff",
+ "ly:stem::calc-default-direction",
+ "ly:stem::calc-direction",
+ "ly:stem::calc-length",
+ "ly:stem::calc-positioning-done",
+ "ly:stem::calc-stem-begin-position",
+ "ly:stem::calc-stem-end-position",
+ "ly:stem::calc-stem-info",
+ "ly:stem::height",
+ "ly:stem::offset-callback",
+ "ly:stem::print",
+ "ly:stem::pure-calc-length",
+ "ly:stem::pure-calc-stem-begin-position",
+ "ly:stem::pure-calc-stem-end-position",
+ "ly:stem::pure-height",
+ "ly:stem::width",
+ "ly:stencil-add",
+ "ly:stencil-aligned-to",
+ "ly:stencil-combine-at-edge",
+ "ly:stencil-empty?",
+ "ly:stencil-expr",
+ "ly:stencil-extent",
+ "ly:stencil-in-color",
+ "ly:stencil-outline",
+ "ly:stencil-rotate",
+ "ly:stencil-rotate-absolute",
+ "ly:stencil-scale",
+ "ly:stencil-stack",
+ "ly:stencil-translate",
+ "ly:stencil-translate-axis",
+ "ly:stencil?",
+ "ly:stream-event::dump",
+ "ly:stream-event::undump",
+ "ly:stream-event?",
+ "ly:string-percent-encode",
+ "ly:string-substitute",
+ "ly:sustain-pedal::print",
+ "ly:system",
+ "ly:system-font-load",
+ "ly:system-start-delimiter::print",
+ "ly:system::calc-pure-height",
+ "ly:system::calc-pure-relevant-grobs",
+ "ly:system::footnotes-after-line-breaking",
+ "ly:system::footnotes-before-line-breaking",
+ "ly:system::get-nonspaceable-staves",
+ "ly:system::get-spaceable-staves",
+ "ly:system::get-staves",
+ "ly:system::get-vertical-alignment",
+ "ly:system::height",
+ "ly:system::vertical-skyline-elements",
+ "ly:text-interface::interpret-markup",
+ "ly:text-interface::interpret-string",
+ "ly:text-interface::print",
+ "ly:tie-column::before-line-breaking",
+ "ly:tie-column::calc-positioning-done",
+ "ly:tie::calc-control-points",
+ "ly:tie::calc-direction",
+ "ly:tie::print",
+ "ly:time-signature::print",
+ "ly:transform->list",
+ "ly:transform?",
+ "ly:translate-cpp-warning-scheme",
+ "ly:translator-context",
+ "ly:translator-description",
+ "ly:translator-group?",
+ "ly:translator-name",
+ "ly:translator?",
+ "ly:transpose-key-alist",
+ "ly:ttf->pfa",
+ "ly:ttf-ps-name",
+ "ly:tuplet-bracket::calc-connect-to-neighbors",
+ "ly:tuplet-bracket::calc-cross-staff",
+ "ly:tuplet-bracket::calc-direction",
+ "ly:tuplet-bracket::calc-positions",
+ "ly:tuplet-bracket::calc-x-positions",
+ "ly:tuplet-bracket::print",
+ "ly:tuplet-iterator::constructor",
+ "ly:tuplet-number::calc-cross-staff",
+ "ly:tuplet-number::calc-x-offset",
+ "ly:tuplet-number::calc-y-offset",
+ "ly:tuplet-number::print",
+ "ly:type1->pfa",
+ "ly:unit",
+ "ly:unpure-call",
+ "ly:unpure-pure-container-pure-part",
+ "ly:unpure-pure-container-unpure-part",
+ "ly:unpure-pure-container?",
+ "ly:usage",
+ "ly:vaticana-ligature::brew-ligature-primitive",
+ "ly:vaticana-ligature::print",
+ "ly:verbose-output?",
+ "ly:version",
+ "ly:version?",
+ "ly:volta-bracket-interface::print",
+ "ly:volta-bracket::calc-shorten-pair",
+ "ly:volta-repeat-iterator::constructor",
+ "ly:volta-specced-music-iterator::constructor",
+ "ly:vowel-transition::set-spacing-rods",
+ "ly:warning",
+ "ly:warning-located",
+ "ly:wide-char->utf-8",
+ "lyric-hyphen::vaticana-style",
+ "lyric-text::print",
+ "magnification->font-size",
+ "magnify-markup",
+ "magnifyStaff-is-set?",
+ "magstep",
+ "maj7-modifier",
+ "make-abs-fontsize-markup",
+ "make-accidental-dodecaphonic-rule",
+ "make-accidental-markup",
+ "make-accidental-rule",
+ "make-align-on-other-markup",
+ "make-apply-context",
+ "make-arrow-head-markup",
+ "make-articulation",
+ "make-auto-footnote-markup",
+ "make-autochange-music",
+ "make-backslashed-digit-markup",
+ "make-beam-markup",
+ "make-bezier-sandwich-stencil",
+ "make-bold-markup",
+ "make-bow-stencil",
+ "make-box-markup",
+ "make-bracket-bar-line",
+ "make-bracket-markup",
+ "make-c-time-signature-markup",
+ "make-caps-markup",
+ "make-center-align-markup",
+ "make-center-column-markup",
+ "make-central-column-hole-addresses",
+ "make-char-markup",
+ "make-chord-elements",
+ "make-circle-markup",
+ "make-circle-stencil",
+ "make-clef-set",
+ "make-coda-markup",
+ "make-colon-bar-line",
+ "make-color-handler",
+ "make-column-lines-markup-list",
+ "make-column-markup",
+ "make-combine-markup",
+ "make-compound-meter-markup",
+ "make-concat-markup",
+ "make-conditional-circle-markup-markup",
+ "make-connected-line",
+ "make-connected-path-stencil",
+ "make-cue-clef-set",
+ "make-cue-clef-unset",
+ "make-customTabClef-markup",
+ "make-dashed-bar-line",
+ "make-default-fonts-tree",
+ "make-dir-column-markup",
+ "make-dotted-bar-line",
+ "make-doubleflat-markup",
+ "make-doublesharp-markup",
+ "make-draw-circle-markup",
+ "make-draw-dashed-line-markup",
+ "make-draw-dotted-line-markup",
+ "make-draw-hline-markup",
+ "make-draw-line-markup",
+ "make-draw-squiggle-line-markup",
+ "make-duration-of-length",
+ "make-dynamic-markup",
+ "make-ellipse-markup",
+ "make-ellipse-stencil",
+ "make-empty-bar-line",
+ "make-engraver",
+ "make-epsfile-markup",
+ "make-event-chord",
+ "make-extended-scale",
+ "make-eyeglasses-markup",
+ "make-fermata-markup",
+ "make-fill-line-markup",
+ "make-fill-with-pattern-markup",
+ "make-filled-box-markup",
+ "make-filled-box-stencil",
+ "make-finger-markup",
+ "make-first-visible-markup",
+ "make-flat-markup",
+ "make-font-tree-leaf",
+ "make-font-tree-node",
+ "make-fontCaps-markup",
+ "make-fontsize-markup",
+ "make-footnote-markup",
+ "make-fraction-markup",
+ "make-fret-diagram",
+ "make-fret-diagram-markup",
+ "make-fret-diagram-terse-markup",
+ "make-fret-diagram-verbose-markup",
+ "make-fromproperty-markup",
+ "make-general-align-markup",
+ "make-glyph-time-signature-markup",
+ "make-grace-music",
+ "make-graceless-rhythmic-location",
+ "make-grob-property-override",
+ "make-grob-property-revert",
+ "make-grob-property-set",
+ "make-halign-markup",
+ "make-harmonic",
+ "make-harp-pedal-markup",
+ "make-hashq-cached-function",
+ "make-hbracket-markup",
+ "make-hcenter-in-markup",
+ "make-hspace-markup",
+ "make-huge-markup",
+ "make-if-markup",
+ "make-italic-markup",
+ "make-justified-lines-markup-list",
+ "make-justify-field-markup",
+ "make-justify-line-markup",
+ "make-justify-markup",
+ "make-justify-string-markup",
+ "make-key-alist",
+ "make-key-symbols",
+ "make-kievan-bar-line",
+ "make-large-markup",
+ "make-larger-markup",
+ "make-left-align-markup",
+ "make-left-brace-markup",
+ "make-left-column-markup",
+ "make-left-hand-key-addresses",
+ "make-line-markup",
+ "make-line-stencil",
+ "make-lookup-markup",
+ "make-lower-markup",
+ "make-lyric-event",
+ "make-lyric-repeat-count-formatter",
+ "make-magnify-markup",
+ "make-map-markup-commands-markup-list",
+ "make-markalphabet-markup",
+ "make-markletter-markup",
+ "make-markup",
+ "make-medium-markup",
+ "make-modal-inverter",
+ "make-modal-transposer",
+ "make-multi-measure-rest",
+ "make-multi-measure-rest-by-number-markup",
+ "make-music",
+ "make-musicglyph-markup",
+ "make-name-keylist",
+ "make-named-spreadsheet",
+ "make-natural-markup",
+ "make-non-relative-music",
+ "make-normal-size-sub-markup",
+ "make-normal-size-super-markup",
+ "make-normal-text-markup",
+ "make-normalsize-markup",
+ "make-note-by-number-markup",
+ "make-note-markup",
+ "make-null-markup",
+ "make-number-keylist",
+ "make-number-markup",
+ "make-on-the-fly-markup",
+ "make-oval-markup",
+ "make-oval-stencil",
+ "make-overlay-markup",
+ "make-override-lines-markup-list",
+ "make-override-markup",
+ "make-overtie-markup",
+ "make-pad-around-markup",
+ "make-pad-markup-markup",
+ "make-pad-to-box-markup",
+ "make-pad-x-markup",
+ "make-page-link-markup",
+ "make-page-ref-markup",
+ "make-pango-font-tree",
+ "make-parenthesis-stencil",
+ "make-parenthesize-markup",
+ "make-part-combine-context-changes",
+ "make-part-combine-marks",
+ "make-partial-ellipse-stencil",
+ "make-path-markup",
+ "make-path-stencil",
+ "make-pattern-markup",
+ "make-percent-set",
+ "make-performer",
+ "make-polygon-markup",
+ "make-postscript-markup",
+ "make-property-recursive-markup",
+ "make-property-set",
+ "make-property-unset",
+ "make-put-adjacent-markup",
+ "make-raise-markup",
+ "make-relative",
+ "make-relative::to-relative-callback",
+ "make-repeat",
+ "make-replace-markup",
+ "make-rest-by-number-markup",
+ "make-rest-markup",
+ "make-rhythm-markup",
+ "make-rhythmic-location",
+ "make-right-align-markup",
+ "make-right-brace-markup",
+ "make-right-column-markup",
+ "make-right-hand-key-addresses",
+ "make-roman-markup",
+ "make-rotate-markup",
+ "make-rounded-box-markup",
+ "make-sans-markup",
+ "make-scale",
+ "make-scale-markup",
+ "make-score-lines-markup-list",
+ "make-score-markup",
+ "make-segno-bar-line",
+ "make-segno-markup",
+ "make-semiflat-markup",
+ "make-semisharp-markup",
+ "make-semitone->pitch",
+ "make-sequential-music",
+ "make-sesquiflat-markup",
+ "make-sesquisharp-markup",
+ "make-session-variable",
+ "make-setting",
+ "make-sharp-markup",
+ "make-short-bar-line",
+ "make-simple-bar-line",
+ "make-simple-markup",
+ "make-simultaneous-music",
+ "make-skip-music",
+ "make-skipped",
+ "make-slashed-digit-markup",
+ "make-small-markup",
+ "make-smallCaps-markup",
+ "make-smaller-markup",
+ "make-spacer-bar-line",
+ "make-span-event",
+ "make-split-state",
+ "make-spreadsheet",
+ "make-stem-span!",
+ "make-stem-spans!",
+ "make-stencil-boxer",
+ "make-stencil-circler",
+ "make-stencil-markup",
+ "make-string-lines-markup-list",
+ "make-strut-markup",
+ "make-sub-markup",
+ "make-super-markup",
+ "make-symbol-alist",
+ "make-tab-heads-transparent",
+ "make-table-markup-list",
+ "make-teeny-markup",
+ "make-text-markup",
+ "make-thick-bar-line",
+ "make-tick-bar-line",
+ "make-tie-markup",
+ "make-tie-stencil",
+ "make-tied-lyric-markup",
+ "make-tilted-portion",
+ "make-time-signature-set",
+ "make-tiny-markup",
+ "make-tmpfile",
+ "make-translate-markup",
+ "make-translate-scaled-markup",
+ "make-translator",
+ "make-translator-component",
+ "make-translator-internal",
+ "make-transparent-box-stencil",
+ "make-transparent-markup",
+ "make-tremolo-set",
+ "make-triangle-markup",
+ "make-type-checker",
+ "make-typewriter-markup",
+ "make-underline-markup",
+ "make-undertie-markup",
+ "make-unfolded-set",
+ "make-unless-markup",
+ "make-upright-markup",
+ "make-varcoda-markup",
+ "make-vcenter-markup",
+ "make-verbatim-file-markup",
+ "make-voice-props-override",
+ "make-voice-props-revert",
+ "make-voice-props-set",
+ "make-voice-states",
+ "make-volta-set",
+ "make-vspace-markup",
+ "make-whiteout-markup",
+ "make-with-color-markup",
+ "make-with-dimension-from-markup",
+ "make-with-dimension-markup",
+ "make-with-dimensions-from-markup",
+ "make-with-dimensions-markup",
+ "make-with-link-markup",
+ "make-with-outline-markup",
+ "make-with-string-transformer-markup",
+ "make-with-true-dimension-markup",
+ "make-with-true-dimensions-markup",
+ "make-with-url-markup",
+ "make-woodwind-diagram-markup",
+ "make-wordwrap-field-markup",
+ "make-wordwrap-internal-markup-list",
+ "make-wordwrap-lines-markup-list",
+ "make-wordwrap-markup",
+ "make-wordwrap-string-internal-markup-list",
+ "make-wordwrap-string-markup",
+ "map-alist-keys",
+ "map-alist-vals",
+ "map-markup-commands-markup-list",
+ "map-selected-alist-keys",
+ "map-some-music",
+ "markalphabet-markup",
+ "marked-up-headfoot",
+ "marked-up-title",
+ "markgeneric-string",
+ "markletter-markup",
+ "markup",
+ "markup->string",
+ "markup-argument-list-error",
+ "markup-argument-list?",
+ "markup-command-list?",
+ "markup-command-signature",
+ "markup-default-to-string-method",
+ "markup-expression->make-markup",
+ "markup-function-as-string-method",
+ "markup-function-category",
+ "markup-function-properties",
+ "markup-function?",
+ "markup-join",
+ "markup-lambda",
+ "markup-lambda-listify",
+ "markup-lambda-worker",
+ "markup-list-function?",
+ "markup-list-lambda",
+ "markup-list?",
+ "markup-thrower-typecheck",
+ "markup-typecheck?",
+ "markup?",
+ "match-predicate",
+ "measure-counter::text",
+ "medium-markup",
+ "mensural-flag",
+ "merge-details",
+ "metronome-markup",
+ "middle-broken-spanner?",
+ "midi-program",
+ "midline-stencil",
+ "minor-modifier",
+ "mkdir-if-not-exist",
+ "mm-rest-child-list",
+ "mmrest-of-length",
+ "modern-straight-flag",
+ "modified-font-metric-font-scaling",
+ "modulo-bar-number-visible",
+ "moment",
+ "moment->fraction",
+ "moment-min",
+ "moment-pair?",
+ "moment<=?",
+ "move-chord-note",
+ "multi-fork",
+ "multi-measure-rest-by-number-markup",
+ "music->make-music",
+ "music-check-error",
+ "music-clone",
+ "music-filter",
+ "music-invert",
+ "music-is-of-type?",
+ "music-map",
+ "music-pitches",
+ "music-property-description",
+ "music-selective-filter",
+ "music-selective-map",
+ "music-separator?",
+ "music-type-predicate",
+ "musicglyph-markup",
+ "n-true-entries",
+ "narrow-glyph?",
+ "natural-chord-alteration",
+ "natural-markup",
+ "negate-extent",
+ "neo-modern-accidental-rule",
+ "no-flag",
+ "normal-flag",
+ "normal-size-sub-markup",
+ "normal-size-super-markup",
+ "normal-text-markup",
+ "normalize-fraction",
+ "normalsize-markup",
+ "not-first-broken-spanner?",
+ "not-last-broken-spanner?",
+ "note-by-number-markup",
+ "note-events",
+ "note-head::brew-ez-stencil",
+ "note-head::calc-duration-log",
+ "note-head::calc-glyph-name",
+ "note-head::calc-kievan-duration-log",
+ "note-markup",
+ "note-name->german-markup",
+ "note-name->markup",
+ "note-name->string",
+ "note-name-markup",
+ "note-names-language",
+ "note-to-cluster",
+ "notes-to-clusters",
+ "null-markup",
+ "number->octal-string",
+ "number-column-stencil",
+ "number-format",
+ "number-list?",
+ "number-markup",
+ "number-or-grob?",
+ "number-or-pair?",
+ "number-or-string?",
+ "number-pair-list?",
+ "number-pair?",
+ "numbered-footnotes",
+ "numerify",
+ "object-type",
+ "object-type-name",
+ "oboe-lh-I-key-stencil",
+ "oboe-lh-II-key-stencil",
+ "oboe-lh-III-key-stencil",
+ "oboe-lh-b-key-stencil",
+ "oboe-lh-bes-key-stencil",
+ "oboe-lh-cis-key-stencil",
+ "oboe-lh-d-key-stencil",
+ "oboe-lh-ees-key-stencil",
+ "oboe-lh-ees-lh-bes-key-stencil",
+ "oboe-lh-f-key-stencil",
+ "oboe-lh-gis-key-stencil",
+ "oboe-lh-gis-lh-low-b-key-stencil",
+ "oboe-lh-low-b-key-stencil",
+ "oboe-lh-octave-key-stencil",
+ "oboe-rh-a-key-stencil",
+ "oboe-rh-banana-key-stencil",
+ "oboe-rh-c-key-stencil",
+ "oboe-rh-c-rh-ees-key-stencil",
+ "oboe-rh-cis-key-stencil",
+ "oboe-rh-d-key-stencil",
+ "oboe-rh-ees-key-stencil",
+ "oboe-rh-f-key-stencil",
+ "oboe-rh-gis-key-stencil",
+ "octave-woodwind-text-stencil",
+ "offset-add",
+ "offset-flip-y",
+ "offset-fret",
+ "offset-multiple-types",
+ "offset-scale",
+ "offsetter",
+ "old-straight-flag",
+ "on-the-fly-markup",
+ "only-if-beamed",
+ "ordered-cons",
+ "other-axis",
+ "output-module?",
+ "output-scopes",
+ "outputproperty-compatibility",
+ "oval-markup",
+ "oval-stencil",
+ "overlay-markup",
+ "override-head-style",
+ "override-lines-markup-list",
+ "override-markup",
+ "override-property-setting",
+ "override-time-signature-setting",
+ "overtie-markup",
+ "pad-around-markup",
+ "pad-markup-markup",
+ "pad-to-box-markup",
+ "pad-x-markup",
+ "page-link-markup",
+ "page-ref-markup",
+ "pair-map",
+ "pango-font-name",
+ "pango-pf-file-name",
+ "pango-pf-font-name",
+ "pango-pf-fontindex",
+ "paper-variable",
+ "parentheses-interface::calc-angled-bracket-stencils",
+ "parentheses-interface::calc-parenthesis-stencils",
+ "parentheses-interface::print",
+ "parentheses-interface::y-extent",
+ "parenthesize-elements",
+ "parenthesize-markup",
+ "parenthesize-stencil",
+ "parse-and-check-version",
+ "parse-lily-version",
+ "parse-terse-string",
+ "path-markup",
+ "pattern-markup",
+ "percussion?",
+ "perform-text-replacements",
+ "performance-name-from-headers",
+ "piccolo-rh-x-key-stencil",
+ "pitch-alteration-semitones",
+ "pitch-invert",
+ "pitch-of-note",
+ "pitch-step",
+ "polar->rectangular",
+ "polygon-markup",
+ "position-true-endpoint",
+ "postprocess-output",
+ "postscript->pdf",
+ "postscript->png",
+ "postscript->ps",
+ "postscript-markup",
+ "precompute-music-length",
+ "prepend-alist-chain",
+ "prepend-props",
+ "pretty-printable?",
+ "previous-span-state",
+ "previous-voice-state",
+ "print-book-with",
+ "print-book-with-defaults",
+ "print-book-with-defaults-as-systems",
+ "print-circled-text-callback",
+ "print-keys",
+ "print-keys-verbose",
+ "process-fill-value",
+ "property-recursive-markup",
+ "pure-chain-offset-callback",
+ "pure-from-neighbor-interface::account-for-span-bar",
+ "pure-from-neighbor-interface::extra-spacing-height",
+ "pure-from-neighbor-interface::extra-spacing-height-at-beginning-of-line",
+ "pure-from-neighbor-interface::extra-spacing-height-including-staff",
+ "pure-from-neighbor-interface::pure-height",
+ "put-adjacent-markup",
+ "quarterdiff->string",
+ "quote-substitute",
+ "raise-markup",
+ "randomize-rand-seed",
+ "ratio->fret",
+ "ratio->pitch",
+ "rational-or-procedure?",
+ "read-lily-expression",
+ "read-lily-expression-internal",
+ "recent-enough?",
+ "recompute-music-length",
+ "recording-group-emulate",
+ "regexp-split",
+ "relevant-book-systems",
+ "relevant-dump-systems",
+ "remove-grace-property",
+ "remove-step",
+ "remove-whitespace",
+ "repeat-tie::handle-tab-note-head",
+ "replace-markup",
+ "replace-step",
+ "replacement-hashtab",
+ "replacement-regexp",
+ "replicate-modify",
+ "reset-stencil-colors",
+ "rest-by-number-markup",
+ "rest-markup",
+ "retrieve-glyph-flag",
+ "retrograde-music",
+ "return-1",
+ "reverse-interval",
+ "revert-fontSize",
+ "revert-head-style",
+ "revert-property-setting",
+ "revert-props",
+ "revert-time-signature-setting",
+ "rgb-color",
+ "rh-woodwind-text-stencil",
+ "rhythm-markup",
+ "rhythmic-location->file-string",
+ "rhythmic-location->string",
+ "rhythmic-location-bar-number",
+ "rhythmic-location-measure-position",
+ "rhythmic-location<=?",
+ "rhythmic-location<?",
+ "rhythmic-location=?",
+ "rhythmic-location>=?",
+ "rhythmic-location>?",
+ "rhythmic-location?",
+ "rich-bassoon-uber-key-stencil",
+ "rich-e-stencil",
+ "rich-group-draw-rule",
+ "rich-group-extra-offset-rule",
+ "rich-path-stencil",
+ "rich-pe-stencil",
+ "right-align-markup",
+ "right-brace-markup",
+ "right-column-markup",
+ "ring-column-circle-stencil",
+ "robust-bar-number-function",
+ "roman-markup",
+ "rotate-markup",
+ "rounded-box-markup",
+ "rounded-box-stencil",
+ "sans-markup",
+ "sans-serif-stencil",
+ "saxophone-lh-T-key-stencil",
+ "saxophone-lh-b-cis-key-stencil",
+ "saxophone-lh-b-key-stencil",
+ "saxophone-lh-bes-key-stencil",
+ "saxophone-lh-cis-key-stencil",
+ "saxophone-lh-d-key-stencil",
+ "saxophone-lh-ees-key-stencil",
+ "saxophone-lh-f-key-stencil",
+ "saxophone-lh-front-f-key-stencil",
+ "saxophone-lh-gis-key-stencil",
+ "saxophone-lh-low-a-key-stencil",
+ "saxophone-lh-low-bes-key-stencil",
+ "saxophone-name-passerelle",
+ "saxophone-rh-bes-key-stencil",
+ "saxophone-rh-c-key-stencil",
+ "saxophone-rh-e-key-stencil",
+ "saxophone-rh-ees-key-stencil",
+ "saxophone-rh-fis-key-stencil",
+ "saxophone-rh-high-fis-key-stencil",
+ "saxophone-rh-low-c-key-stencil",
+ "saxophone-rh-side-key-stencil",
+ "scale->factor",
+ "scale-beam-thickness",
+ "scale-by-font-size",
+ "scale-fontSize",
+ "scale-layout",
+ "scale-markup",
+ "scale-props",
+ "scale?",
+ "scheme?",
+ "scm->string",
+ "score-lines-markup-list",
+ "score-markup",
+ "scorify-music",
+ "script-interface::calc-x-offset",
+ "script-or-side-position-cross-staff",
+ "search-executable",
+ "seconds->moment",
+ "segno-markup",
+ "select-head-glyph",
+ "select-option",
+ "self-alignment-interface::self-aligned-on-breakable",
+ "self-evaluating?",
+ "semi-tie::calc-cross-staff",
+ "semiflat-markup",
+ "semisharp-markup",
+ "sequential-music-to-chord-exceptions",
+ "sesquiflat-markup",
+ "sesquisharp-markup",
+ "session-replay",
+ "session-save",
+ "session-start-record",
+ "session-terminate",
+ "set-accidental-style",
+ "set-bar-number-visibility",
+ "set-counter-text!",
+ "set-default-paper-size",
+ "set-global-fonts",
+ "set-global-staff-size",
+ "set-mus-properties!",
+ "set-output-property",
+ "set-paper-dimension-variables",
+ "set-paper-dimensions",
+ "set-paper-size",
+ "sharp-markup",
+ "shift-duration-log",
+ "shift-octave",
+ "shift-one-duration-log",
+ "shift-right-at-line-begin",
+ "shift-semitone->pitch",
+ "short-glyph?",
+ "sign",
+ "silence-events",
+ "simple-markup",
+ "simple-stencil-alist",
+ "skip->rest",
+ "skip-as-needed",
+ "skip-of-length",
+ "skip-of-moment-span",
+ "skyline-pair-and-non-empty?",
+ "skyline-pair::empty?",
+ "slashed-digit-internal",
+ "slashed-digit-markup",
+ "slashify",
+ "small-markup",
+ "smallCaps-markup",
+ "smaller-markup",
+ "space-lines",
+ "span-bar::compound-bar-line",
+ "span-state",
+ "split-at-predicate",
+ "split-index",
+ "split-list",
+ "split-list-by-separator",
+ "stack-lines",
+ "stack-stencil-line",
+ "stack-stencils",
+ "stack-stencils-padding-list",
+ "stack-thirds",
+ "staff-ellipsis::calc-y-extent",
+ "staff-ellipsis::print",
+ "staff-magnification-is-changing?",
+ "staff-symbol-line-count",
+ "staff-symbol-line-positions",
+ "staff-symbol-line-span",
+ "staff-symbol-y-extent-from-line-positions",
+ "standard-e-stencil",
+ "standard-path-stencil",
+ "stderr",
+ "stem-connectable?",
+ "stem-is-root?",
+ "stem-span-stencil",
+ "stem-stub::do-calculations",
+ "stem-stub::extra-spacing-height",
+ "stem-stub::pure-height",
+ "stem-stub::width",
+ "stem-tremolo::calc-tab-width",
+ "stem::calc-duration-log",
+ "stem::kievan-offset-callback",
+ "stencil-fretboard-extent",
+ "stencil-fretboard-offset",
+ "stencil-markup",
+ "stencil-true-extent",
+ "stencil-whiteout",
+ "stencil-whiteout-box",
+ "stencil-whiteout-outline",
+ "stencil-with-color",
+ "sticky-grob-interface::inherit-property",
+ "straight-flag",
+ "string->string-list",
+ "string-encode-integer",
+ "string-endswith",
+ "string-lines-markup-list",
+ "string-number::calc-text",
+ "string-or-music?",
+ "string-or-pair?",
+ "string-or-symbol?",
+ "string-regexp-substitute",
+ "string-startswith",
+ "string-thickness",
+ "strip-string-annotation",
+ "stroke-finger::calc-text",
+ "strut-markup",
+ "style-note-heads",
+ "sub-markup",
+ "subtract-base-fret",
+ "suggest-convert-ly-message",
+ "super-markup",
+ "sus-modifier",
+ "symbol-concatenate",
+ "symbol-footnotes",
+ "symbol-key-alist?",
+ "symbol-key<?",
+ "symbol-list-or-music?",
+ "symbol-list-or-symbol?",
+ "symbol-list?",
+ "symbol<?",
+ "symlink-if-not-exist",
+ "symlink-or-copy-if-not-exist",
+ "symmetric-interval",
+ "synced?",
+ "system-start-text::calc-x-offset",
+ "system-start-text::calc-y-offset",
+ "system-start-text::print",
+ "tab-note-head::calc-glyph-name",
+ "tab-note-head::print",
+ "tab-note-head::print-custom-fret-label",
+ "tab-note-head::whiteout-if-style-set",
+ "tablature-position-on-lines",
+ "table-markup-list",
+ "tabvoice::draw-double-stem-for-half-notes",
+ "tabvoice::make-double-stem-width-for-half-notes",
+ "tag-group-get",
+ "tags-keep-predicate",
+ "tags-remove-predicate",
+ "teaching-accidental-rule",
+ "teeny-markup",
+ "text-fill-translate",
+ "text-markup",
+ "tie-markup",
+ "tie::handle-tab-note-head",
+ "tied-lyric-markup",
+ "tiny-markup",
+ "translate-draw-instructions",
+ "translate-key-instruction",
+ "translate-markup",
+ "translate-scaled-markup",
+ "translator-property-description",
+ "transparent-markup",
+ "transposer-factory",
+ "triangle-markup",
+ "trill-pitch-group::pure-height",
+ "true-entry?",
+ "tuning",
+ "tuplet-number::append-note-wrapper",
+ "tuplet-number::calc-denominator-text",
+ "tuplet-number::calc-direction",
+ "tuplet-number::calc-fraction-text",
+ "tuplet-number::fraction-with-notes",
+ "tuplet-number::non-default-fraction-with-notes",
+ "tuplet-number::non-default-tuplet-denominator-text",
+ "tuplet-number::non-default-tuplet-fraction-text",
+ "type-name",
+ "typewriter-markup",
+ "unbroken-or-first-broken-spanner?",
+ "unbroken-or-last-broken-spanner?",
+ "unbroken-spanner?",
+ "underline-markup",
+ "undertie-markup",
+ "unfold-repeats",
+ "unfold-repeats-fully",
+ "uniform-draw-instructions",
+ "uniform-extra-offset-rule",
+ "uniq-list",
+ "uniqued-alist",
+ "unity-if-multimeasure",
+ "universal-color",
+ "unless-markup",
+ "update-possb-list",
+ "upper-key-stencil",
+ "upright-markup",
+ "value-for-spanner-piece",
+ "varcoda-markup",
+ "variable-column-circle-stencil",
+ "vcenter-markup",
+ "vector-for-each",
+ "verbatim-file-markup",
+ "version-not-seen-message",
+ "voice-states",
+ "voicify-chord",
+ "voicify-list",
+ "voicify-music",
+ "void?",
+ "volta-bracket-interface::pure-height",
+ "volta-bracket::calc-hook-visibility",
+ "volta-spec-music",
+ "vspace-markup",
+ "whiteout-markup",
+ "with-color-markup",
+ "with-dimension-from-markup",
+ "with-dimension-markup",
+ "with-dimensions-from-markup",
+ "with-dimensions-markup",
+ "with-link-markup",
+ "with-outline-markup",
+ "with-string-transformer-markup",
+ "with-true-dimension-markup",
+ "with-true-dimensions-markup",
+ "with-url-markup",
+ "woodwind-diagram-markup",
+ "wordwrap-field-markup",
+ "wordwrap-internal-markup-list",
+ "wordwrap-lines-markup-list",
+ "wordwrap-markup",
+ "wordwrap-stencils",
+ "wordwrap-string-internal-markup-list",
+ "wordwrap-string-markup",
+ "write-lilypond-book-aux-files",
+ "write-me",
+ "write-performances-midis",
+ "x11-color",
+]
+
+context_properties = [
+ "aDueText",
+ "accidentalGrouping",
+ "additionalBassStrings",
+ "additionalPitchPrefix",
+ "alignAboveContext",
+ "alignBelowContext",
+ "alterationGlyphs",
+ "alternativeNumber",
+ "alternativeNumberingStyle",
+ "alternativeRestores",
+ "associatedVoice",
+ "associatedVoiceContext",
+ "associatedVoiceType",
+ "autoAccidentals",
+ "autoBeamCheck",
+ "autoBeaming",
+ "autoCautionaries",
+ "barAlways",
+ "barCheckLastFail",
+ "barCheckSynchronize",
+ "barExtraVelocity",
+ "barNumberFormatter",
+ "barNumberVisibility",
+ "baseMoment",
+ "beamExceptions",
+ "beamHalfMeasure",
+ "beamMelismaBusy",
+ "beatExtraVelocity",
+ "beatStructure",
+ "breathMarkDefinitions",
+ "breathMarkType",
+ "busyGrobs",
+ "centerBarNumbers",
+ "chordChanges",
+ "chordNameExceptions",
+ "chordNameFunction",
+ "chordNameLowercaseMinor",
+ "chordNameSeparator",
+ "chordNoteNamer",
+ "chordPrefixSpacer",
+ "chordRootNamer",
+ "clefGlyph",
+ "clefPosition",
+ "clefTransposition",
+ "clefTranspositionFormatter",
+ "clefTranspositionStyle",
+ "codaMarkCount",
+ "codaMarkFormatter",
+ "completionBusy",
+ "completionFactor",
+ "completionUnit",
+ "connectArpeggios",
+ "countPercentRepeats",
+ "createKeyOnClefChange",
+ "createSpacing",
+ "crescendoSpanner",
+ "crescendoText",
+ "cueClefGlyph",
+ "cueClefPosition",
+ "cueClefTransposition",
+ "cueClefTranspositionFormatter",
+ "cueClefTranspositionStyle",
+ "currentBarLine",
+ "currentBarNumber",
+ "currentChordCause",
+ "currentChordText",
+ "currentCommandColumn",
+ "currentMarkEvent",
+ "currentMusicalColumn",
+ "dalSegnoTextFormatter",
+ "decrescendoSpanner",
+ "decrescendoText",
+ "defaultStrings",
+ "doubleRepeatBarType",
+ "doubleRepeatSegnoBarType",
+ "doubleSlurs",
+ "drumPitchTable",
+ "drumStyleTable",
+ "dynamicAbsoluteVolumeFunction",
+ "endAtSkip",
+ "endRepeatBarType",
+ "endRepeatSegnoBarType",
+ "explicitClefVisibility",
+ "explicitCueClefVisibility",
+ "explicitKeySignatureVisibility",
+ "extendersOverRests",
+ "extraNatural",
+ "figuredBassAlterationDirection",
+ "figuredBassCenterContinuations",
+ "figuredBassFormatter",
+ "figuredBassLargeNumberAlignment",
+ "figuredBassPlusDirection",
+ "figuredBassPlusStrokedAlist",
+ "finalFineTextVisibility",
+ "finalizations",
+ "fineBarType",
+ "fineSegnoBarType",
+ "fineStartRepeatSegnoBarType",
+ "fineText",
+ "fingeringOrientations",
+ "firstClef",
+ "followVoice",
+ "fontSize",
+ "forbidBreak",
+ "forbidBreakBetweenBarLines",
+ "forceBreak",
+ "forceClef",
+ "fretLabels",
+ "glissandoMap",
+ "graceSettings",
+ "gridInterval",
+ "handleNegativeFrets",
+ "harmonicAccidentals",
+ "harmonicDots",
+ "hasAxisGroup",
+ "hasStaffSpacing",
+ "highStringOne",
+ "ignoreBarChecks",
+ "ignoreBarNumberChecks",
+ "ignoreFiguredBassRest",
+ "ignoreMelismata",
+ "implicitBassFigures",
+ "includeGraceNotes",
+ "initialTimeSignatureVisibility",
+ "instrumentCueName",
+ "instrumentEqualizer",
+ "instrumentName",
+ "instrumentTransposition",
+ "internalBarNumber",
+ "keepAliveInterfaces",
+ "keyAlterationOrder",
+ "keyAlterations",
+ "lastChord",
+ "lastKeyAlterations",
+ "localAlterations",
+ "lyricMelismaAlignment",
+ "lyricRepeatCountFormatter",
+ "magnifyStaffValue",
+ "majorSevenSymbol",
+ "maximumFretStretch",
+ "measureBarType",
+ "measureLength",
+ "measurePosition",
+ "measureStartNow",
+ "melismaBusy",
+ "melismaBusyProperties",
+ "metronomeMarkFormatter",
+ "middleCClefPosition",
+ "middleCCuePosition",
+ "middleCOffset",
+ "middleCPosition",
+ "midiBalance",
+ "midiChannelMapping",
+ "midiChorusLevel",
+ "midiExpression",
+ "midiInstrument",
+ "midiMaximumVolume",
+ "midiMergeUnisons",
+ "midiMinimumVolume",
+ "midiPanPosition",
+ "midiReverbLevel",
+ "midiSkipOffset",
+ "minimumFret",
+ "minimumPageTurnLength",
+ "minimumRepeatLengthForPageTurn",
+ "minorChordModifier",
+ "noChordSymbol",
+ "noteNameFunction",
+ "noteNameSeparator",
+ "noteToFretFunction",
+ "nullAccidentals",
+ "ottavaStartNow",
+ "ottavation",
+ "ottavationMarkups",
+ "output",
+ "partCombineForced",
+ "partCombineTextsOnNote",
+ "partialBusy",
+ "pedalSostenutoStrings",
+ "pedalSostenutoStyle",
+ "pedalSustainStrings",
+ "pedalSustainStyle",
+ "pedalUnaCordaStrings",
+ "pedalUnaCordaStyle",
+ "predefinedDiagramTable",
+ "printAccidentalNames",
+ "printKeyCancellation",
+ "printNotesLanguage",
+ "printOctaveNames",
+ "printPartCombineTexts",
+ "proportionalNotationDuration",
+ "quotedCueEventTypes",
+ "quotedEventTypes",
+ "rehearsalMark",
+ "rehearsalMarkFormatter",
+ "repeatCommands",
+ "repeatCountVisibility",
+ "restCompletionBusy",
+ "restNumberThreshold",
+ "restrainOpenStrings",
+ "rootSystem",
+ "scriptDefinitions",
+ "searchForVoice",
+ "sectionBarType",
+ "segnoBarType",
+ "segnoMarkCount",
+ "segnoMarkFormatter",
+ "segnoStyle",
+ "shapeNoteStyles",
+ "shortInstrumentName",
+ "shortVocalName",
+ "skipBars",
+ "skipTypesetting",
+ "slashChordSeparator",
+ "slurMelismaBusy",
+ "soloIIText",
+ "soloText",
+ "squashedPosition",
+ "staffLineLayoutFunction",
+ "stanza",
+ "startAtNoteColumn",
+ "startAtSkip",
+ "startRepeatBarType",
+ "startRepeatSegnoBarType",
+ "stavesFound",
+ "stemLeftBeamCount",
+ "stemRightBeamCount",
+ "strictBeatBeaming",
+ "stringFretFingerList",
+ "stringNumberOrientations",
+ "stringOneTopmost",
+ "stringTunings",
+ "strokeFingerOrientations",
+ "subdivideBeams",
+ "suggestAccidentals",
+ "supportNonIntegerFret",
+ "suspendMelodyDecisions",
+ "suspendRestMerging",
+ "systemStartDelimiter",
+ "systemStartDelimiterHierarchy",
+ "tabStaffLineLayoutFunction",
+ "tablatureFormat",
+ "tempoHideNote",
+ "tempoWholesPerMinute",
+ "tieMelismaBusy",
+ "tieWaitForNote",
+ "timeSignatureFraction",
+ "timeSignatureSettings",
+ "timing",
+ "tonic",
+ "topLevelAlignment",
+ "tupletFullLength",
+ "tupletFullLengthNote",
+ "tupletSpannerDuration",
+ "underlyingRepeatBarType",
+ "useBassFigureExtenders",
+ "vocalName",
+ "voltaSpannerDuration",
+ "whichBar",
+]
+
+grob_properties = [
+ "X-align-on-main-noteheads",
+ "X-attachment",
+ "X-common",
+ "X-extent",
+ "X-offset",
+ "X-positions",
+ "Y-attachment",
+ "Y-common",
+ "Y-extent",
+ "Y-offset",
+ "accidental-grob",
+ "accidental-grobs",
+ "add-cauda",
+ "add-join",
+ "add-stem",
+ "add-stem-support",
+ "adjacent-pure-heights",
+ "adjacent-spanners",
+ "after-line-breaking",
+ "align-dir",
+ "all-elements",
+ "allow-loose-spacing",
+ "allow-span-bar",
+ "alteration",
+ "alteration-alist",
+ "alteration-glyph-name-alist",
+ "annotation",
+ "annotation-balloon",
+ "annotation-line",
+ "arpeggio-direction",
+ "arrow-length",
+ "arrow-width",
+ "ascendens",
+ "auctum",
+ "auto-knee-gap",
+ "automatically-numbered",
+ "average-spacing-wishes",
+ "avoid-note-head",
+ "avoid-scripts",
+ "avoid-slur",
+ "axes",
+ "axis-group-parent-X",
+ "axis-group-parent-Y",
+ "bar-extent",
+ "bars",
+ "base-shortest-duration",
+ "baseline-skip",
+ "beam",
+ "beam-segments",
+ "beam-thickness",
+ "beam-width",
+ "beamed-stem-shorten",
+ "beaming",
+ "beamlet-default-length",
+ "beamlet-max-length-proportion",
+ "before-line-breaking",
+ "begin-of-line-visible",
+ "bend-me",
+ "between-cols",
+ "bezier",
+ "bound-alignment-interfaces",
+ "bound-details",
+ "bound-padding",
+ "bounded-by-me",
+ "bracket",
+ "bracket-flare",
+ "bracket-text",
+ "bracket-visibility",
+ "break-align-anchor",
+ "break-align-anchor-alignment",
+ "break-align-orders",
+ "break-align-symbol",
+ "break-align-symbols",
+ "break-alignment",
+ "break-overshoot",
+ "break-visibility",
+ "breakable",
+ "broken-bound-padding",
+ "c0-position",
+ "cause",
+ "cavum",
+ "chord-dots-limit",
+ "chord-names",
+ "circled-tip",
+ "clef-alignments",
+ "clip-edges",
+ "collapse-height",
+ "collision-interfaces",
+ "collision-voice-only",
+ "color",
+ "columns",
+ "common-shortest-duration",
+ "concaveness",
+ "concurrent-hairpins",
+ "conditional-elements",
+ "connect-to-neighbor",
+ "context-info",
+ "control-points",
+ "count-from",
+ "covered-grobs",
+ "cross-staff",
+ "damping",
+ "dash-definition",
+ "dash-fraction",
+ "dash-period",
+ "dashed-edge",
+ "default-direction",
+ "default-staff-staff-spacing",
+ "delta-position",
+ "deminutum",
+ "descendens",
+ "details",
+ "digit-names",
+ "direction",
+ "direction-source",
+ "display-cautionary",
+ "dot",
+ "dot-count",
+ "dot-negative-kern",
+ "dot-placement-list",
+ "dots",
+ "double-stem-separation",
+ "duration-log",
+ "eccentricity",
+ "edge-height",
+ "edge-text",
+ "elements",
+ "encompass-objects",
+ "endpoint-alignments",
+ "expand-limit",
+ "extra-dy",
+ "extra-offset",
+ "extra-spacing-height",
+ "extra-spacing-width",
+ "extroversion",
+ "figures",
+ "filled",
+ "flag",
+ "flag-count",
+ "flag-style",
+ "flat-positions",
+ "flexa-height",
+ "flexa-interval",
+ "flexa-width",
+ "font",
+ "font-encoding",
+ "font-family",
+ "font-features",
+ "font-name",
+ "font-series",
+ "font-shape",
+ "font-size",
+ "footnote",
+ "footnote-music",
+ "footnote-stencil",
+ "footnote-text",
+ "footnotes-after-line-breaking",
+ "footnotes-before-line-breaking",
+ "force-hshift",
+ "forced",
+ "forced-spacing",
+ "fraction",
+ "french-beaming",
+ "french-beaming-stem-adjustment",
+ "fret-diagram-details",
+ "full-length-padding",
+ "full-length-to-extent",
+ "full-measure-extra-space",
+ "full-size-change",
+ "gap",
+ "gap-count",
+ "glissando-index",
+ "glissando-skip",
+ "glyph",
+ "glyph-left",
+ "glyph-name",
+ "glyph-right",
+ "grace-spacing",
+ "graphical",
+ "grow-direction",
+ "hair-thickness",
+ "harp-pedal-details",
+ "has-span-bar",
+ "head-direction",
+ "head-width",
+ "heads",
+ "height",
+ "height-limit",
+ "hide-tied-accidental-after-break",
+ "horizon-padding",
+ "horizontal-shift",
+ "horizontal-skylines",
+ "id",
+ "ideal-distances",
+ "ignore-ambitus",
+ "ignore-collision",
+ "implicit",
+ "important-column-ranks",
+ "in-note-direction",
+ "in-note-padding",
+ "in-note-stencil",
+ "inclinatum",
+ "index",
+ "inspect-quants",
+ "interfaces",
+ "items-worth-living",
+ "keep-alive-with",
+ "keep-inside-line",
+ "kern",
+ "knee",
+ "knee-spacing-correction",
+ "knee-to-beam",
+ "labels",
+ "layer",
+ "least-squares-dy",
+ "ledger-extra",
+ "ledger-line-thickness",
+ "ledger-positions",
+ "ledger-positions-function",
+ "left-bound-info",
+ "left-items",
+ "left-neighbor",
+ "left-number-text",
+ "left-padding",
+ "length",
+ "length-fraction",
+ "ligature-flexa",
+ "line-break-penalty",
+ "line-break-permission",
+ "line-break-system-details",
+ "line-count",
+ "line-positions",
+ "line-thickness",
+ "linea",
+ "long-text",
+ "main-extent",
+ "make-dead-when",
+ "max-beam-connect",
+ "max-symbol-separation",
+ "maximum-gap",
+ "maybe-loose",
+ "measure-count",
+ "measure-division",
+ "measure-division-chord-placement-alist",
+ "measure-division-lines-alist",
+ "measure-length",
+ "melody-spanner",
+ "merge-differently-dotted",
+ "merge-differently-headed",
+ "meta",
+ "minimum-X-extent",
+ "minimum-Y-extent",
+ "minimum-distance",
+ "minimum-distances",
+ "minimum-length",
+ "minimum-length-after-break",
+ "minimum-length-fraction",
+ "minimum-space",
+ "minimum-translations-alist",
+ "neighbors",
+ "neutral-direction",
+ "neutral-position",
+ "next",
+ "no-ledgers",
+ "no-stem-extend",
+ "non-break-align-symbols",
+ "non-default",
+ "non-musical",
+ "nonstaff-nonstaff-spacing",
+ "nonstaff-relatedstaff-spacing",
+ "nonstaff-unrelatedstaff-spacing",
+ "normal-stems",
+ "normalized-endpoints",
+ "note-collision",
+ "note-collision-threshold",
+ "note-columns",
+ "note-head",
+ "note-heads",
+ "note-names",
+ "number-range-separator",
+ "number-type",
+ "numbering-assertion-function",
+ "oriscus",
+ "output-attributes",
+ "outside-staff-horizontal-padding",
+ "outside-staff-padding",
+ "outside-staff-placement-directive",
+ "outside-staff-priority",
+ "packed-spacing",
+ "padding",
+ "padding-pairs",
+ "page-break-penalty",
+ "page-break-permission",
+ "page-number",
+ "page-turn-penalty",
+ "page-turn-permission",
+ "parent-alignment-X",
+ "parent-alignment-Y",
+ "parenthesis-friends",
+ "parenthesis-id",
+ "parenthesized",
+ "pedal-text",
+ "pes-or-flexa",
+ "positioning-done",
+ "positions",
+ "prefer-dotted-right",
+ "prefix-set",
+ "primitive",
+ "protrusion",
+ "pure-Y-common",
+ "pure-Y-extent",
+ "pure-Y-offset-in-progress",
+ "pure-relevant-grobs",
+ "pure-relevant-items",
+ "pure-relevant-spanners",
+ "quantize-position",
+ "quantized-positions",
+ "quilisma",
+ "rank-on-page",
+ "ratio",
+ "remove-empty",
+ "remove-first",
+ "remove-layer",
+ "replacement-alist",
+ "rest",
+ "rest-collision",
+ "restore-first",
+ "rests",
+ "rhythmic-location",
+ "right-bound-info",
+ "right-items",
+ "right-neighbor",
+ "right-number-text",
+ "right-padding",
+ "rotation",
+ "round-up-exceptions",
+ "round-up-to-longer-rest",
+ "rounded",
+ "same-direction-correction",
+ "script-column",
+ "script-priority",
+ "script-stencil",
+ "scripts",
+ "segno-kern",
+ "self-alignment-X",
+ "self-alignment-Y",
+ "shape",
+ "sharp-positions",
+ "shorten",
+ "shorten-pair",
+ "shortest-duration-space",
+ "shortest-playing-duration",
+ "shortest-starter-duration",
+ "show-control-points",
+ "show-horizontal-skylines",
+ "show-vertical-skylines",
+ "side-axis",
+ "side-relative-direction",
+ "side-support-elements",
+ "size",
+ "skip-quanting",
+ "skyline-horizontal-padding",
+ "skyline-vertical-padding",
+ "slash-negative-kern",
+ "slope",
+ "slur",
+ "slur-padding",
+ "snap-radius",
+ "space-alist",
+ "space-increment",
+ "space-to-barline",
+ "spacing",
+ "spacing-increment",
+ "spacing-pair",
+ "spacing-wishes",
+ "span-start",
+ "spanner-broken",
+ "spanner-id",
+ "spanner-placement",
+ "springs-and-rods",
+ "stacking-dir",
+ "staff-affinity",
+ "staff-grouper",
+ "staff-padding",
+ "staff-position",
+ "staff-space",
+ "staff-staff-spacing",
+ "staff-symbol",
+ "staffgroup-staff-spacing",
+ "stem",
+ "stem-attachment",
+ "stem-begin-position",
+ "stem-info",
+ "stem-spacing-correction",
+ "stemlet-length",
+ "stems",
+ "stencil",
+ "stencils",
+ "sticky-host",
+ "strict-grace-spacing",
+ "strict-note-spacing",
+ "stroke-style",
+ "stropha",
+ "style",
+ "system-Y-offset",
+ "text",
+ "text-alignment-X",
+ "text-alignment-Y",
+ "text-direction",
+ "thick-thickness",
+ "thickness",
+ "tie",
+ "tie-configuration",
+ "ties",
+ "to-barline",
+ "toward-stem-shift",
+ "toward-stem-shift-in-column",
+ "transparent",
+ "tremolo-flag",
+ "tuplet-number",
+ "tuplet-slur",
+ "tuplet-start",
+ "tuplets",
+ "uniform-stretching",
+ "usable-duration-logs",
+ "use-skylines",
+ "used",
+ "vertical-alignment",
+ "vertical-skyline-elements",
+ "vertical-skylines",
+ "virga",
+ "voiced-position",
+ "when",
+ "whiteout",
+ "whiteout-style",
+ "width",
+ "word-space",
+ "x-offset",
+ "zigzag-length",
+ "zigzag-width",
+]
+
+paper_variables = [
+ "auto-first-page-number",
+ "basic-distance",
+ "binding-offset",
+ "blank-last-page-penalty",
+ "blank-page-penalty",
+ "bookTitleMarkup",
+ "bottom-margin",
+ "check-consistency",
+ "evenFooterMarkup",
+ "evenHeaderMarkup",
+ "first-page-number",
+ "footnote-separator-markup",
+ "horizontal-shift",
+ "indent",
+ "inner-margin",
+ "last-bottom-spacing",
+ "left-margin",
+ "line-width",
+ "markup-markup-spacing",
+ "markup-system-spacing",
+ "max-systems-per-page",
+ "min-systems-per-page",
+ "minimum-distance",
+ "oddFooterMarkup",
+ "oddHeaderMarkup",
+ "outer-margin",
+ "padding",
+ "page-breaking",
+ "page-breaking-system-system-spacing",
+ "page-count",
+ "page-number-type",
+ "page-spacing-weight",
+ "paper-height",
+ "paper-width",
+ "print-all-headers",
+ "print-first-page-number",
+ "ragged-bottom",
+ "ragged-last",
+ "ragged-last-bottom",
+ "ragged-right",
+ "right-margin",
+ "score-markup-spacing",
+ "score-system-spacing",
+ "scoreTitleMarkup",
+ "short-indent",
+ "stretchability",
+ "system-count",
+ "system-separator-markup",
+ "system-system-spacing",
+ "systems-per-page",
+ "top-margin",
+ "top-markup-spacing",
+ "top-system-spacing",
+ "two-sided",
+]
+
+header_variables = [
+ "arranger",
+ "composer",
+ "copyright",
+ "dedication",
+ "doctitle",
+ "instrument",
+ "lsrtags",
+ "meter",
+ "opus",
+ "piece",
+ "poet",
+ "subsubtitle",
+ "subtitle",
+ "tagline",
+ "texidoc",
+ "title",
+]
+
diff --git a/pygments/lexers/_lua_builtins.py b/pygments/lexers/_lua_builtins.py
new file mode 100644
index 0000000..a31f6b3
--- /dev/null
+++ b/pygments/lexers/_lua_builtins.py
@@ -0,0 +1,285 @@
+"""
+ pygments.lexers._lua_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file contains the names and modules of lua functions
+ It is able to re-generate itself, but for adding new functions you
+ probably have to add some callbacks (see function module_callbacks).
+
+ Do not edit the MODULES dict by hand.
+
+ Run with `python -I` to regenerate.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+MODULES = {'basic': ('_G',
+ '_VERSION',
+ 'assert',
+ 'collectgarbage',
+ 'dofile',
+ 'error',
+ 'getmetatable',
+ 'ipairs',
+ 'load',
+ 'loadfile',
+ 'next',
+ 'pairs',
+ 'pcall',
+ 'print',
+ 'rawequal',
+ 'rawget',
+ 'rawlen',
+ 'rawset',
+ 'select',
+ 'setmetatable',
+ 'tonumber',
+ 'tostring',
+ 'type',
+ 'warn',
+ 'xpcall'),
+ 'bit32': ('bit32.arshift',
+ 'bit32.band',
+ 'bit32.bnot',
+ 'bit32.bor',
+ 'bit32.btest',
+ 'bit32.bxor',
+ 'bit32.extract',
+ 'bit32.lrotate',
+ 'bit32.lshift',
+ 'bit32.replace',
+ 'bit32.rrotate',
+ 'bit32.rshift'),
+ 'coroutine': ('coroutine.close',
+ 'coroutine.create',
+ 'coroutine.isyieldable',
+ 'coroutine.resume',
+ 'coroutine.running',
+ 'coroutine.status',
+ 'coroutine.wrap',
+ 'coroutine.yield'),
+ 'debug': ('debug.debug',
+ 'debug.gethook',
+ 'debug.getinfo',
+ 'debug.getlocal',
+ 'debug.getmetatable',
+ 'debug.getregistry',
+ 'debug.getupvalue',
+ 'debug.getuservalue',
+ 'debug.sethook',
+ 'debug.setlocal',
+ 'debug.setmetatable',
+ 'debug.setupvalue',
+ 'debug.setuservalue',
+ 'debug.traceback',
+ 'debug.upvalueid',
+ 'debug.upvaluejoin'),
+ 'io': ('io.close',
+ 'io.flush',
+ 'io.input',
+ 'io.lines',
+ 'io.open',
+ 'io.output',
+ 'io.popen',
+ 'io.read',
+ 'io.stderr',
+ 'io.stdin',
+ 'io.stdout',
+ 'io.tmpfile',
+ 'io.type',
+ 'io.write'),
+ 'math': ('math.abs',
+ 'math.acos',
+ 'math.asin',
+ 'math.atan',
+ 'math.atan2',
+ 'math.ceil',
+ 'math.cos',
+ 'math.cosh',
+ 'math.deg',
+ 'math.exp',
+ 'math.floor',
+ 'math.fmod',
+ 'math.frexp',
+ 'math.huge',
+ 'math.ldexp',
+ 'math.log',
+ 'math.max',
+ 'math.maxinteger',
+ 'math.min',
+ 'math.mininteger',
+ 'math.modf',
+ 'math.pi',
+ 'math.pow',
+ 'math.rad',
+ 'math.random',
+ 'math.randomseed',
+ 'math.sin',
+ 'math.sinh',
+ 'math.sqrt',
+ 'math.tan',
+ 'math.tanh',
+ 'math.tointeger',
+ 'math.type',
+ 'math.ult'),
+ 'modules': ('package.config',
+ 'package.cpath',
+ 'package.loaded',
+ 'package.loadlib',
+ 'package.path',
+ 'package.preload',
+ 'package.searchers',
+ 'package.searchpath',
+ 'require'),
+ 'os': ('os.clock',
+ 'os.date',
+ 'os.difftime',
+ 'os.execute',
+ 'os.exit',
+ 'os.getenv',
+ 'os.remove',
+ 'os.rename',
+ 'os.setlocale',
+ 'os.time',
+ 'os.tmpname'),
+ 'string': ('string.byte',
+ 'string.char',
+ 'string.dump',
+ 'string.find',
+ 'string.format',
+ 'string.gmatch',
+ 'string.gsub',
+ 'string.len',
+ 'string.lower',
+ 'string.match',
+ 'string.pack',
+ 'string.packsize',
+ 'string.rep',
+ 'string.reverse',
+ 'string.sub',
+ 'string.unpack',
+ 'string.upper'),
+ 'table': ('table.concat',
+ 'table.insert',
+ 'table.move',
+ 'table.pack',
+ 'table.remove',
+ 'table.sort',
+ 'table.unpack'),
+ 'utf8': ('utf8.char',
+ 'utf8.charpattern',
+ 'utf8.codepoint',
+ 'utf8.codes',
+ 'utf8.len',
+ 'utf8.offset')}
+
+if __name__ == '__main__': # pragma: no cover
+ import re
+ from urllib.request import urlopen
+ import pprint
+
+ # you can't generally find out what module a function belongs to if you
+ # have only its name. Because of this, here are some callback functions
+ # that recognize if a gioven function belongs to a specific module
+ def module_callbacks():
+ def is_in_coroutine_module(name):
+ return name.startswith('coroutine.')
+
+ def is_in_modules_module(name):
+ if name in ['require', 'module'] or name.startswith('package'):
+ return True
+ else:
+ return False
+
+ def is_in_string_module(name):
+ return name.startswith('string.')
+
+ def is_in_table_module(name):
+ return name.startswith('table.')
+
+ def is_in_math_module(name):
+ return name.startswith('math')
+
+ def is_in_io_module(name):
+ return name.startswith('io.')
+
+ def is_in_os_module(name):
+ return name.startswith('os.')
+
+ def is_in_debug_module(name):
+ return name.startswith('debug.')
+
+ return {'coroutine': is_in_coroutine_module,
+ 'modules': is_in_modules_module,
+ 'string': is_in_string_module,
+ 'table': is_in_table_module,
+ 'math': is_in_math_module,
+ 'io': is_in_io_module,
+ 'os': is_in_os_module,
+ 'debug': is_in_debug_module}
+
+
+
+ def get_newest_version():
+ f = urlopen('http://www.lua.org/manual/')
+ r = re.compile(r'^<A HREF="(\d\.\d)/">(Lua )?\1</A>')
+ for line in f:
+ m = r.match(line.decode('iso-8859-1'))
+ if m is not None:
+ return m.groups()[0]
+
+ def get_lua_functions(version):
+ f = urlopen('http://www.lua.org/manual/%s/' % version)
+ r = re.compile(r'^<A HREF="manual.html#pdf-(?!lua|LUA)([^:]+)">\1</A>')
+ functions = []
+ for line in f:
+ m = r.match(line.decode('iso-8859-1'))
+ if m is not None:
+ functions.append(m.groups()[0])
+ return functions
+
+ def get_function_module(name):
+ for mod, cb in module_callbacks().items():
+ if cb(name):
+ return mod
+ if '.' in name:
+ return name.split('.')[0]
+ else:
+ return 'basic'
+
+ def regenerate(filename, modules):
+ with open(filename) as fp:
+ content = fp.read()
+
+ header = content[:content.find('MODULES = {')]
+ footer = content[content.find("if __name__ == '__main__':"):]
+
+
+ with open(filename, 'w') as fp:
+ fp.write(header)
+ fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
+ fp.write(footer)
+
+ def run():
+ version = get_newest_version()
+ functions = set()
+ for v in ('5.2', version):
+ print('> Downloading function index for Lua %s' % v)
+ f = get_lua_functions(v)
+ print('> %d functions found, %d new:' %
+ (len(f), len(set(f) - functions)))
+ functions |= set(f)
+
+ functions = sorted(functions)
+
+ modules = {}
+ for full_function_name in functions:
+ print('>> %s' % full_function_name)
+ m = get_function_module(full_function_name)
+ modules.setdefault(m, []).append(full_function_name)
+ modules = {k: tuple(v) for k, v in modules.items()}
+
+ regenerate(__file__, modules)
+
+ run()
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
new file mode 100644
index 0000000..d2d7880
--- /dev/null
+++ b/pygments/lexers/_mapping.py
@@ -0,0 +1,553 @@
+# Automatically generated by scripts/gen_mapfiles.py.
+# DO NOT EDIT BY HAND; run `make mapfiles` instead.
+
+LEXERS = {
+ 'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
+ 'AMDGPULexer': ('pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
+ 'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
+ 'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
+ 'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
+ 'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
+ 'AdaLexer': ('pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
+ 'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
+ 'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
+ 'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
+ 'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
+ 'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
+ 'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
+ 'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
+ 'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
+ 'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
+ 'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
+ 'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
+ 'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
+ 'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
+ 'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
+ 'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
+ 'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
+ 'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
+ 'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
+ 'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
+ 'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
+ 'ArrowLexer': ('pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
+ 'ArturoLexer': ('pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()),
+ 'AscLexer': ('pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')),
+ 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
+ 'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
+ 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
+ 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
+ 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
+ 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
+ 'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
+ 'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
+ 'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
+ 'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
+ 'BareLexer': ('pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
+ 'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
+ 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
+ 'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
+ 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
+ 'BddLexer': ('pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
+ 'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
+ 'BerryLexer': ('pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
+ 'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
+ 'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
+ 'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
+ 'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
+ 'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
+ 'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
+ 'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
+ 'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
+ 'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
+ 'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
+ 'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
+ 'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
+ 'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
+ 'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
+ 'CSSUL4Lexer': ('pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
+ 'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
+ 'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
+ 'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
+ 'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
+ 'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
+ 'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
+ 'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
+ 'CddlLexer': ('pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
+ 'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
+ 'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
+ 'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
+ 'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
+ 'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
+ 'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
+ 'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
+ 'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
+ 'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
+ 'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
+ 'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
+ 'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
+ 'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
+ 'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
+ 'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
+ 'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
+ 'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
+ 'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
+ 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
+ 'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
+ 'Comal80Lexer': ('pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
+ 'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
+ 'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
+ 'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
+ 'CplintLexer': ('pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
+ 'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
+ 'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
+ 'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
+ 'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
+ 'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
+ 'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
+ 'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
+ 'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
+ 'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
+ 'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
+ 'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
+ 'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
+ 'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
+ 'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
+ 'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
+ 'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
+ 'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
+ 'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
+ 'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
+ 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
+ 'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
+ 'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
+ 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
+ 'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
+ 'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
+ 'DevicetreeLexer': ('pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
+ 'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
+ 'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
+ 'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
+ 'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
+ 'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
+ 'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
+ 'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
+ 'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
+ 'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
+ 'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
+ 'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
+ 'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
+ 'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
+ 'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
+ 'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
+ 'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
+ 'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
+ 'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
+ 'ElpiLexer': ('pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
+ 'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
+ 'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
+ 'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
+ 'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
+ 'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
+ 'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
+ 'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
+ 'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
+ 'ExeclineLexer': ('pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
+ 'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
+ 'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)),
+ 'FStarLexer': ('pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
+ 'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
+ 'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
+ 'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
+ 'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
+ 'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
+ 'FiftLexer': ('pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()),
+ 'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
+ 'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
+ 'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
+ 'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
+ 'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
+ 'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
+ 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
+ 'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
+ 'FuncLexer': ('pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()),
+ 'FutharkLexer': ('pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
+ 'GAPConsoleLexer': ('pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()),
+ 'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
+ 'GDScriptLexer': ('pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
+ 'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
+ 'GSQLLexer': ('pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
+ 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
+ 'GcodeLexer': ('pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
+ 'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
+ 'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
+ 'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
+ 'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
+ 'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
+ 'GoLexer': ('pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
+ 'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
+ 'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
+ 'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
+ 'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
+ 'GraphvizLexer': ('pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
+ 'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
+ 'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
+ 'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
+ 'HTMLUL4Lexer': ('pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
+ 'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
+ 'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
+ 'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
+ 'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
+ 'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
+ 'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
+ 'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
+ 'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()),
+ 'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
+ 'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
+ 'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
+ 'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
+ 'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
+ 'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
+ 'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
+ 'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
+ 'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
+ 'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
+ 'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
+ 'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
+ 'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
+ 'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
+ 'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
+ 'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
+ 'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')),
+ 'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
+ 'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
+ 'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
+ 'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
+ 'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
+ 'JMESPathLexer': ('pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
+ 'JSLTLexer': ('pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
+ 'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
+ 'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
+ 'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
+ 'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
+ 'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
+ 'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
+ 'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
+ 'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
+ 'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
+ 'JavascriptUL4Lexer': ('pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
+ 'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
+ 'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
+ 'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', (), (), ()),
+ 'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
+ 'JsonLexer': ('pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
+ 'JsonnetLexer': ('pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()),
+ 'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
+ 'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
+ 'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
+ 'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
+ 'KLexer': ('pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
+ 'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
+ 'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
+ 'KernelLogLexer': ('pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
+ 'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
+ 'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
+ 'KuinLexer': ('pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
+ 'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
+ 'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
+ 'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
+ 'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
+ 'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
+ 'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
+ 'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
+ 'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
+ 'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
+ 'LilyPondLexer': ('pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
+ 'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
+ 'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
+ 'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
+ 'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
+ 'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
+ 'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
+ 'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
+ 'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
+ 'LlvmMirBodyLexer': ('pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
+ 'LlvmMirLexer': ('pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
+ 'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
+ 'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
+ 'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
+ 'MCFunctionLexer': ('pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
+ 'MCSchemaLexer': ('pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)),
+ 'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
+ 'MIPSLexer': ('pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()),
+ 'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
+ 'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
+ 'Macaulay2Lexer': ('pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
+ 'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
+ 'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
+ 'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
+ 'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
+ 'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
+ 'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
+ 'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
+ 'MarkdownLexer': ('pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
+ 'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
+ 'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
+ 'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
+ 'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
+ 'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
+ 'MaximaLexer': ('pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
+ 'MesonLexer': ('pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
+ 'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
+ 'MiniScriptLexer': ('pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
+ 'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
+ 'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
+ 'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
+ 'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
+ 'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
+ 'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
+ 'MoselLexer': ('pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
+ 'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
+ 'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
+ 'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
+ 'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
+ 'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
+ 'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
+ 'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
+ 'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
+ 'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
+ 'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
+ 'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
+ 'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
+ 'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
+ 'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
+ 'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
+ 'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
+ 'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
+ 'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)),
+ 'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
+ 'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
+ 'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
+ 'NestedTextLexer': ('pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
+ 'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
+ 'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
+ 'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
+ 'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
+ 'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
+ 'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
+ 'NodeConsoleLexer': ('pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
+ 'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
+ 'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
+ 'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
+ 'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
+ 'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
+ 'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
+ 'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
+ 'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
+ 'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
+ 'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
+ 'OmgIdlLexer': ('pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
+ 'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
+ 'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
+ 'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
+ 'OutputLexer': ('pygments.lexers.special', 'Text output', ('output',), (), ()),
+ 'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
+ 'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
+ 'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
+ 'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
+ 'PegLexer': ('pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
+ 'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
+ 'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
+ 'PhixLexer': ('pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)),
+ 'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
+ 'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
+ 'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
+ 'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
+ 'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
+ 'PointlessLexer': ('pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
+ 'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
+ 'PortugolLexer': ('pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()),
+ 'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
+ 'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
+ 'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
+ 'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
+ 'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
+ 'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
+ 'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
+ 'ProcfileLexer': ('pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
+ 'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
+ 'PromQLLexer': ('pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
+ 'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
+ 'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
+ 'PsyshConsoleLexer': ('pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
+ 'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
+ 'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
+ 'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
+ 'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
+ 'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
+ 'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
+ 'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
+ 'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
+ 'PythonUL4Lexer': ('pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
+ 'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
+ 'QLexer': ('pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
+ 'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
+ 'QlikLexer': ('pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
+ 'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
+ 'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
+ 'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
+ 'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
+ 'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
+ 'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
+ 'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
+ 'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
+ 'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
+ 'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
+ 'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
+ 'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
+ 'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
+ 'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
+ 'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
+ 'ReasonLexer': ('pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
+ 'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
+ 'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
+ 'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
+ 'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
+ 'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
+ 'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
+ 'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
+ 'RideLexer': ('pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
+ 'RitaLexer': ('pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
+ 'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
+ 'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
+ 'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
+ 'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
+ 'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
+ 'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
+ 'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
+ 'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
+ 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
+ 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
+ 'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
+ 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
+ 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
+ 'SNBTLexer': ('pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
+ 'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
+ 'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
+ 'SaviLexer': ('pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
+ 'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
+ 'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
+ 'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
+ 'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
+ 'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
+ 'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
+ 'SedLexer': ('pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
+ 'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
+ 'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
+ 'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
+ 'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
+ 'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
+ 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
+ 'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
+ 'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
+ 'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
+ 'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
+ 'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
+ 'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
+ 'SmithyLexer': ('pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
+ 'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
+ 'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
+ 'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
+ 'SophiaLexer': ('pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
+ 'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
+ 'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
+ 'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
+ 'SpiceLexer': ('pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
+ 'SqlJinjaLexer': ('pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
+ 'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
+ 'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
+ 'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
+ 'SrcinfoLexer': ('pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
+ 'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
+ 'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
+ 'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
+ 'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
+ 'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
+ 'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
+ 'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
+ 'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
+ 'TNTLexer': ('pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
+ 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
+ 'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
+ 'TalLexer': ('pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
+ 'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
+ 'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
+ 'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
+ 'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
+ 'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
+ 'TealLexer': ('pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
+ 'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
+ 'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
+ 'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
+ 'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
+ 'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
+ 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
+ 'ThingsDBLexer': ('pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
+ 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
+ 'TiddlyWiki5Lexer': ('pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
+ 'TlbLexer': ('pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()),
+ 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
+ 'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
+ 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
+ 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
+ 'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
+ 'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
+ 'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
+ 'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
+ 'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
+ 'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
+ 'UL4Lexer': ('pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
+ 'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
+ 'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
+ 'UnixConfigLexer': ('pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
+ 'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
+ 'UsdLexer': ('pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
+ 'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
+ 'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
+ 'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
+ 'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
+ 'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
+ 'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
+ 'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
+ 'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
+ 'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
+ 'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
+ 'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
+ 'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
+ 'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
+ 'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
+ 'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
+ 'WatLexer': ('pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
+ 'WebIDLLexer': ('pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
+ 'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
+ 'WoWTocLexer': ('pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()),
+ 'WrenLexer': ('pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()),
+ 'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
+ 'XMLUL4Lexer': ('pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
+ 'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
+ 'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
+ 'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
+ 'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
+ 'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
+ 'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
+ 'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
+ 'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
+ 'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
+ 'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
+ 'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
+ 'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
+ 'YangLexer': ('pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
+ 'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
+ 'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
+ 'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
+ 'apdlexer': ('pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
+}
diff --git a/pygments/lexers/_mql_builtins.py b/pygments/lexers/_mql_builtins.py
new file mode 100644
index 0000000..3af29eb
--- /dev/null
+++ b/pygments/lexers/_mql_builtins.py
@@ -0,0 +1,1171 @@
+"""
+ pygments.lexers._mql_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Builtins for the MqlLexer.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+types = (
+ 'AccountBalance',
+ 'AccountCompany',
+ 'AccountCredit',
+ 'AccountCurrency',
+ 'AccountEquity',
+ 'AccountFreeMarginCheck',
+ 'AccountFreeMarginMode',
+ 'AccountFreeMargin',
+ 'AccountInfoDouble',
+ 'AccountInfoInteger',
+ 'AccountInfoString',
+ 'AccountLeverage',
+ 'AccountMargin',
+ 'AccountName',
+ 'AccountNumber',
+ 'AccountProfit',
+ 'AccountServer',
+ 'AccountStopoutLevel',
+ 'AccountStopoutMode',
+ 'Alert',
+ 'ArrayBsearch',
+ 'ArrayCompare',
+ 'ArrayCopyRates',
+ 'ArrayCopySeries',
+ 'ArrayCopy',
+ 'ArrayDimension',
+ 'ArrayFill',
+ 'ArrayFree',
+ 'ArrayGetAsSeries',
+ 'ArrayInitialize',
+ 'ArrayIsDynamic',
+ 'ArrayIsSeries',
+ 'ArrayMaximum',
+ 'ArrayMinimum',
+ 'ArrayRange',
+ 'ArrayResize',
+ 'ArraySetAsSeries',
+ 'ArraySize',
+ 'ArraySort',
+ 'CharArrayToString',
+ 'CharToString',
+ 'CharToStr',
+ 'CheckPointer',
+ 'ColorToARGB',
+ 'ColorToString',
+ 'Comment',
+ 'CopyClose',
+ 'CopyHigh',
+ 'CopyLow',
+ 'CopyOpen',
+ 'CopyRates',
+ 'CopyRealVolume',
+ 'CopySpread',
+ 'CopyTickVolume',
+ 'CopyTime',
+ 'DayOfWeek',
+ 'DayOfYear',
+ 'Day',
+ 'DebugBreak',
+ 'Digits',
+ 'DoubleToString',
+ 'DoubleToStr',
+ 'EnumToString',
+ 'EventChartCustom',
+ 'EventKillTimer',
+ 'EventSetMillisecondTimer',
+ 'EventSetTimer',
+ 'ExpertRemove',
+ 'FileClose',
+ 'FileCopy',
+ 'FileDelete',
+ 'FileFindClose',
+ 'FileFindFirst',
+ 'FileFindNext',
+ 'FileFlush',
+ 'FileGetInteger',
+ 'FileIsEnding',
+ 'FileIsExist',
+ 'FileIsLineEnding',
+ 'FileMove',
+ 'FileOpenHistory',
+ 'FileOpen',
+ 'FileReadArray',
+ 'FileReadBool',
+ 'FileReadDatetime',
+ 'FileReadDouble',
+ 'FileReadFloat',
+ 'FileReadInteger',
+ 'FileReadLong',
+ 'FileReadNumber',
+ 'FileReadString',
+ 'FileReadStruct',
+ 'FileSeek',
+ 'FileSize',
+ 'FileTell',
+ 'FileWriteArray',
+ 'FileWriteDouble',
+ 'FileWriteFloat',
+ 'FileWriteInteger',
+ 'FileWriteLong',
+ 'FileWriteString',
+ 'FileWriteStruct',
+ 'FileWrite',
+ 'FolderClean',
+ 'FolderCreate',
+ 'FolderDelete',
+ 'GetLastError',
+ 'GetPointer',
+ 'GetTickCount',
+ 'GlobalVariableCheck',
+ 'GlobalVariableDel',
+ 'GlobalVariableGet',
+ 'GlobalVariableName',
+ 'GlobalVariableSetOnCondition',
+ 'GlobalVariableSet',
+ 'GlobalVariableTemp',
+ 'GlobalVariableTime',
+ 'GlobalVariablesDeleteAll',
+ 'GlobalVariablesFlush',
+ 'GlobalVariablesTotal',
+ 'HideTestIndicators',
+ 'Hour',
+ 'IndicatorBuffers',
+ 'IndicatorCounted',
+ 'IndicatorDigits',
+ 'IndicatorSetDouble',
+ 'IndicatorSetInteger',
+ 'IndicatorSetString',
+ 'IndicatorShortName',
+ 'IntegerToString',
+ 'IsConnected',
+ 'IsDemo',
+ 'IsDllsAllowed',
+ 'IsExpertEnabled',
+ 'IsLibrariesAllowed',
+ 'IsOptimization',
+ 'IsStopped',
+ 'IsTesting',
+ 'IsTradeAllowed',
+ 'IsTradeContextBusy',
+ 'IsVisualMode',
+ 'MQLInfoInteger',
+ 'MQLInfoString',
+ 'MarketInfo',
+ 'MathAbs',
+ 'MathArccos',
+ 'MathArcsin',
+ 'MathArctan',
+ 'MathCeil',
+ 'MathCos',
+ 'MathExp',
+ 'MathFloor',
+ 'MathIsValidNumber',
+ 'MathLog',
+ 'MathMax',
+ 'MathMin',
+ 'MathMod',
+ 'MathPow',
+ 'MathRand',
+ 'MathRound',
+ 'MathSin',
+ 'MathSqrt',
+ 'MathSrand',
+ 'MathTan',
+ 'MessageBox',
+ 'Minute',
+ 'Month',
+ 'NormalizeDouble',
+ 'ObjectCreate',
+ 'ObjectDelete',
+ 'ObjectDescription',
+ 'ObjectFind',
+ 'ObjectGetDouble',
+ 'ObjectGetFiboDescription',
+ 'ObjectGetInteger',
+ 'ObjectGetShiftByValue',
+ 'ObjectGetString',
+ 'ObjectGetTimeByValue',
+ 'ObjectGetValueByShift',
+ 'ObjectGetValueByTime',
+ 'ObjectGet',
+ 'ObjectMove',
+ 'ObjectName',
+ 'ObjectSetDouble',
+ 'ObjectSetFiboDescription',
+ 'ObjectSetInteger',
+ 'ObjectSetString',
+ 'ObjectSetText',
+ 'ObjectSet',
+ 'ObjectType',
+ 'ObjectsDeleteAll',
+ 'ObjectsTotal',
+ 'OrderCloseBy',
+ 'OrderClosePrice',
+ 'OrderCloseTime',
+ 'OrderClose',
+ 'OrderComment',
+ 'OrderCommission',
+ 'OrderDelete',
+ 'OrderExpiration',
+ 'OrderLots',
+ 'OrderMagicNumber',
+ 'OrderModify',
+ 'OrderOpenPrice',
+ 'OrderOpenTime',
+ 'OrderPrint',
+ 'OrderProfit',
+ 'OrderSelect',
+ 'OrderSend',
+ 'OrderStopLoss',
+ 'OrderSwap',
+ 'OrderSymbol',
+ 'OrderTakeProfit',
+ 'OrderTicket',
+ 'OrderType',
+ 'OrdersHistoryTotal',
+ 'OrdersTotal',
+ 'PeriodSeconds',
+ 'Period',
+ 'PlaySound',
+ 'Point',
+ 'PrintFormat',
+ 'Print',
+ 'RefreshRates',
+ 'ResetLastError',
+ 'ResourceCreate',
+ 'ResourceFree',
+ 'ResourceReadImage',
+ 'ResourceSave',
+ 'Seconds',
+ 'SendFTP',
+ 'SendMail',
+ 'SendNotification',
+ 'SeriesInfoInteger',
+ 'SetIndexArrow',
+ 'SetIndexBuffer',
+ 'SetIndexDrawBegin',
+ 'SetIndexEmptyValue',
+ 'SetIndexLabel',
+ 'SetIndexShift',
+ 'SetIndexStyle',
+ 'SetLevelStyle',
+ 'SetLevelValue',
+ 'ShortArrayToString',
+ 'ShortToString',
+ 'Sleep',
+ 'StrToDouble',
+ 'StrToInteger',
+ 'StrToTime',
+ 'StringAdd',
+ 'StringBufferLen',
+ 'StringCompare',
+ 'StringConcatenate',
+ 'StringFill',
+ 'StringFind',
+ 'StringFormat',
+ 'StringGetCharacter',
+ 'StringGetChar',
+ 'StringInit',
+ 'StringLen',
+ 'StringReplace',
+ 'StringSetCharacter',
+ 'StringSetChar',
+ 'StringSplit',
+ 'StringSubstr',
+ 'StringToCharArray',
+ 'StringToColor',
+ 'StringToDouble',
+ 'StringToInteger',
+ 'StringToLower',
+ 'StringToShortArray',
+ 'StringToTime',
+ 'StringToUpper',
+ 'StringTrimLeft',
+ 'StringTrimRight',
+ 'StructToTime',
+ 'SymbolInfoDouble',
+ 'SymbolInfoInteger',
+ 'SymbolInfoSessionQuote',
+ 'SymbolInfoSessionTrade',
+ 'SymbolInfoString',
+ 'SymbolInfoTick',
+ 'SymbolIsSynchronized',
+ 'SymbolName',
+ 'SymbolSelect',
+ 'SymbolsTotal',
+ 'Symbol',
+ 'TerminalClose',
+ 'TerminalCompany',
+ 'TerminalName',
+ 'TerminalPath',
+ 'TesterStatistics',
+ 'TextGetSize',
+ 'TextOut',
+ 'TextSetFont',
+ 'TimeCurrent',
+ 'TimeDayOfWeek',
+ 'TimeDayOfYear',
+ 'TimeDaylightSavings',
+ 'TimeDay',
+ 'TimeGMTOffset',
+ 'TimeGMT',
+ 'TimeHour',
+ 'TimeLocal',
+ 'TimeMinute',
+ 'TimeMonth',
+ 'TimeSeconds',
+ 'TimeToString',
+ 'TimeToStruct',
+ 'TimeToStr',
+ 'TimeTradeServer',
+ 'TimeYear',
+ 'UninitializeReason',
+ 'WindowBarsPerChart',
+ 'WindowExpertName',
+ 'WindowFind',
+ 'WindowFirstVisibleBar',
+ 'WindowHandle',
+ 'WindowIsVisible',
+ 'WindowOnDropped',
+ 'WindowPriceMax',
+ 'WindowPriceMin',
+ 'WindowPriceOnDropped',
+ 'WindowRedraw',
+ 'WindowScreenShot',
+ 'WindowTimeOnDropped',
+ 'WindowXOnDropped',
+ 'WindowYOnDropped',
+ 'WindowsTotal',
+ 'Year',
+ 'ZeroMemory',
+ 'iAC',
+ 'iADX',
+ 'iAD',
+ 'iAO',
+ 'iATR',
+ 'iAlligator',
+ 'iBWMFI',
+ 'iBandsOnArray',
+ 'iBands',
+ 'iBarShift',
+ 'iBars',
+ 'iBearsPower',
+ 'iBullsPower',
+ 'iCCIOnArray',
+ 'iCCI',
+ 'iClose',
+ 'iCustom',
+ 'iDeMarker',
+ 'iEnvelopesOnArray',
+ 'iEnvelopes',
+ 'iForce',
+ 'iFractals',
+ 'iGator',
+ 'iHighest',
+ 'iHigh',
+ 'iIchimoku',
+ 'iLowest',
+ 'iLow',
+ 'iMACD',
+ 'iMAOnArray',
+ 'iMA',
+ 'iMFI',
+ 'iMomentumOnArray',
+ 'iMomentum',
+ 'iOBV',
+ 'iOpen',
+ 'iOsMA',
+ 'iRSIOnArray',
+ 'iRSI',
+ 'iRVI',
+ 'iSAR',
+ 'iStdDevOnArray',
+ 'iStdDev',
+ 'iStochastic',
+ 'iTime',
+ 'iVolume',
+ 'iWPR',
+)
+
+constants = (
+ 'ACCOUNT_BALANCE',
+ 'ACCOUNT_COMPANY',
+ 'ACCOUNT_CREDIT',
+ 'ACCOUNT_CURRENCY',
+ 'ACCOUNT_EQUITY',
+ 'ACCOUNT_FREEMARGIN',
+ 'ACCOUNT_LEVERAGE',
+ 'ACCOUNT_LIMIT_ORDERS',
+ 'ACCOUNT_LOGIN',
+ 'ACCOUNT_MARGIN',
+ 'ACCOUNT_MARGIN_LEVEL',
+ 'ACCOUNT_MARGIN_SO_CALL',
+ 'ACCOUNT_MARGIN_SO_MODE',
+ 'ACCOUNT_MARGIN_SO_SO',
+ 'ACCOUNT_NAME',
+ 'ACCOUNT_PROFIT',
+ 'ACCOUNT_SERVER',
+ 'ACCOUNT_STOPOUT_MODE_MONEY',
+ 'ACCOUNT_STOPOUT_MODE_PERCENT',
+ 'ACCOUNT_TRADE_ALLOWED',
+ 'ACCOUNT_TRADE_EXPERT',
+ 'ACCOUNT_TRADE_MODE',
+ 'ACCOUNT_TRADE_MODE_CONTEST',
+ 'ACCOUNT_TRADE_MODE_DEMO',
+ 'ACCOUNT_TRADE_MODE_REAL',
+ 'ALIGN_CENTER',
+ 'ALIGN_LEFT',
+ 'ALIGN_RIGHT',
+ 'ANCHOR_BOTTOM',
+ 'ANCHOR_CENTER',
+ 'ANCHOR_LEFT',
+ 'ANCHOR_LEFT_LOWER',
+ 'ANCHOR_LEFT_UPPER',
+ 'ANCHOR_LOWER',
+ 'ANCHOR_RIGHT',
+ 'ANCHOR_RIGHT_LOWER',
+ 'ANCHOR_RIGHT_UPPER',
+ 'ANCHOR_TOP',
+ 'ANCHOR_UPPER',
+ 'BORDER_FLAT',
+ 'BORDER_RAISED',
+ 'BORDER_SUNKEN',
+ 'CHARTEVENT_CHART_CHANGE',
+ 'CHARTEVENT_CLICK',
+ 'CHARTEVENT_CUSTOM',
+ 'CHARTEVENT_CUSTOM_LAST',
+ 'CHARTEVENT_KEYDOWN',
+ 'CHARTEVENT_MOUSE_MOVE',
+ 'CHARTEVENT_OBJECT_CHANGE',
+ 'CHARTEVENT_OBJECT_CLICK',
+ 'CHARTEVENT_OBJECT_CREATE',
+ 'CHARTEVENT_OBJECT_DELETE',
+ 'CHARTEVENT_OBJECT_DRAG',
+ 'CHARTEVENT_OBJECT_ENDEDIT',
+ 'CHARTS_MAX',
+ 'CHART_AUTOSCROLL',
+ 'CHART_BARS',
+ 'CHART_BEGIN',
+ 'CHART_BRING_TO_TOP',
+ 'CHART_CANDLES',
+ 'CHART_COLOR_ASK',
+ 'CHART_COLOR_BACKGROUND',
+ 'CHART_COLOR_BID',
+ 'CHART_COLOR_CANDLE_BEAR',
+ 'CHART_COLOR_CANDLE_BULL',
+ 'CHART_COLOR_CHART_DOWN',
+ 'CHART_COLOR_CHART_LINE',
+ 'CHART_COLOR_CHART_UP',
+ 'CHART_COLOR_FOREGROUND',
+ 'CHART_COLOR_GRID',
+ 'CHART_COLOR_LAST',
+ 'CHART_COLOR_STOP_LEVEL',
+ 'CHART_COLOR_VOLUME',
+ 'CHART_COMMENT',
+ 'CHART_CURRENT_POS',
+ 'CHART_DRAG_TRADE_LEVELS',
+ 'CHART_END',
+ 'CHART_EVENT_MOUSE_MOVE',
+ 'CHART_EVENT_OBJECT_CREATE',
+ 'CHART_EVENT_OBJECT_DELETE',
+ 'CHART_FIRST_VISIBLE_BAR',
+ 'CHART_FIXED_MAX',
+ 'CHART_FIXED_MIN',
+ 'CHART_FIXED_POSITION',
+ 'CHART_FOREGROUND',
+ 'CHART_HEIGHT_IN_PIXELS',
+ 'CHART_IS_OBJECT',
+ 'CHART_LINE',
+ 'CHART_MODE',
+ 'CHART_MOUSE_SCROLL',
+ 'CHART_POINTS_PER_BAR',
+ 'CHART_PRICE_MAX',
+ 'CHART_PRICE_MIN',
+ 'CHART_SCALEFIX',
+ 'CHART_SCALEFIX_11',
+ 'CHART_SCALE',
+ 'CHART_SCALE_PT_PER_BAR',
+ 'CHART_SHIFT',
+ 'CHART_SHIFT_SIZE',
+ 'CHART_SHOW_ASK_LINE',
+ 'CHART_SHOW_BID_LINE',
+ 'CHART_SHOW_DATE_SCALE',
+ 'CHART_SHOW_GRID',
+ 'CHART_SHOW_LAST_LINE',
+ 'CHART_SHOW_OBJECT_DESCR',
+ 'CHART_SHOW_OHLC',
+ 'CHART_SHOW_PERIOD_SEP',
+ 'CHART_SHOW_PRICE_SCALE',
+ 'CHART_SHOW_TRADE_LEVELS',
+ 'CHART_SHOW_VOLUMES',
+ 'CHART_VISIBLE_BARS',
+ 'CHART_VOLUME_HIDE',
+ 'CHART_VOLUME_REAL',
+ 'CHART_VOLUME_TICK',
+ 'CHART_WIDTH_IN_BARS',
+ 'CHART_WIDTH_IN_PIXELS',
+ 'CHART_WINDOWS_TOTAL',
+ 'CHART_WINDOW_HANDLE',
+ 'CHART_WINDOW_IS_VISIBLE',
+ 'CHART_WINDOW_YDISTANCE',
+ 'CHAR_MAX',
+ 'CHAR_MIN',
+ 'CLR_NONE',
+ 'CORNER_LEFT_LOWER',
+ 'CORNER_LEFT_UPPER',
+ 'CORNER_RIGHT_LOWER',
+ 'CORNER_RIGHT_UPPER',
+ 'CP_ACP',
+ 'CP_MACCP',
+ 'CP_OEMCP',
+ 'CP_SYMBOL',
+ 'CP_THREAD_ACP',
+ 'CP_UTF7',
+ 'CP_UTF8',
+ 'DBL_DIG',
+ 'DBL_EPSILON',
+ 'DBL_MANT_DIG',
+ 'DBL_MAX',
+ 'DBL_MAX_10_EXP',
+ 'DBL_MAX_EXP',
+ 'DBL_MIN',
+ 'DBL_MIN_10_EXP',
+ 'DBL_MIN_EXP',
+ 'DRAW_ARROW',
+ 'DRAW_FILLING',
+ 'DRAW_HISTOGRAM',
+ 'DRAW_LINE',
+ 'DRAW_NONE',
+ 'DRAW_SECTION',
+ 'DRAW_ZIGZAG',
+ 'EMPTY',
+ 'EMPTY_VALUE',
+ 'ERR_ACCOUNT_DISABLED',
+ 'ERR_BROKER_BUSY',
+ 'ERR_COMMON_ERROR',
+ 'ERR_INVALID_ACCOUNT',
+ 'ERR_INVALID_PRICE',
+ 'ERR_INVALID_STOPS',
+ 'ERR_INVALID_TRADE_PARAMETERS',
+ 'ERR_INVALID_TRADE_VOLUME',
+ 'ERR_LONG_POSITIONS_ONLY_ALLOWED',
+ 'ERR_MALFUNCTIONAL_TRADE',
+ 'ERR_MARKET_CLOSED',
+ 'ERR_NOT_ENOUGH_MONEY',
+ 'ERR_NOT_ENOUGH_RIGHTS',
+ 'ERR_NO_CONNECTION',
+ 'ERR_NO_ERROR',
+ 'ERR_NO_RESULT',
+ 'ERR_OFF_QUOTES',
+ 'ERR_OLD_VERSION',
+ 'ERR_ORDER_LOCKED',
+ 'ERR_PRICE_CHANGED',
+ 'ERR_REQUOTE',
+ 'ERR_SERVER_BUSY',
+ 'ERR_TOO_FREQUENT_REQUESTS',
+ 'ERR_TOO_MANY_REQUESTS',
+ 'ERR_TRADE_CONTEXT_BUSY',
+ 'ERR_TRADE_DISABLED',
+ 'ERR_TRADE_EXPIRATION_DENIED',
+ 'ERR_TRADE_HEDGE_PROHIBITED',
+ 'ERR_TRADE_MODIFY_DENIED',
+ 'ERR_TRADE_PROHIBITED_BY_FIFO',
+ 'ERR_TRADE_TIMEOUT',
+ 'ERR_TRADE_TOO_MANY_ORDERS',
+ 'FILE_ACCESS_DATE',
+ 'FILE_ANSI',
+ 'FILE_BIN',
+ 'FILE_COMMON',
+ 'FILE_CREATE_DATE',
+ 'FILE_CSV',
+ 'FILE_END',
+ 'FILE_EXISTS',
+ 'FILE_IS_ANSI',
+ 'FILE_IS_BINARY',
+ 'FILE_IS_COMMON',
+ 'FILE_IS_CSV',
+ 'FILE_IS_READABLE',
+ 'FILE_IS_TEXT',
+ 'FILE_IS_WRITABLE',
+ 'FILE_LINE_END',
+ 'FILE_MODIFY_DATE',
+ 'FILE_POSITION',
+ 'FILE_READ',
+ 'FILE_REWRITE',
+ 'FILE_SHARE_READ',
+ 'FILE_SHARE_WRITE',
+ 'FILE_SIZE',
+ 'FILE_TXT',
+ 'FILE_UNICODE',
+ 'FILE_WRITE',
+ 'FLT_DIG',
+ 'FLT_EPSILON',
+ 'FLT_MANT_DIG',
+ 'FLT_MAX',
+ 'FLT_MAX_10_EXP',
+ 'FLT_MAX_EXP',
+ 'FLT_MIN',
+ 'FLT_MIN_10_EXP',
+ 'FLT_MIN_EXP',
+ 'FRIDAY',
+ 'GANN_DOWN_TREND',
+ 'GANN_UP_TREND',
+ 'IDABORT',
+ 'IDCANCEL',
+ 'IDCONTINUE',
+ 'IDIGNORE',
+ 'IDNO',
+ 'IDOK',
+ 'IDRETRY',
+ 'IDTRYAGAIN',
+ 'IDYES',
+ 'INDICATOR_CALCULATIONS',
+ 'INDICATOR_COLOR_INDEX',
+ 'INDICATOR_DATA',
+ 'INDICATOR_DIGITS',
+ 'INDICATOR_HEIGHT',
+ 'INDICATOR_LEVELCOLOR',
+ 'INDICATOR_LEVELSTYLE',
+ 'INDICATOR_LEVELS',
+ 'INDICATOR_LEVELTEXT',
+ 'INDICATOR_LEVELVALUE',
+ 'INDICATOR_LEVELWIDTH',
+ 'INDICATOR_MAXIMUM',
+ 'INDICATOR_MINIMUM',
+ 'INDICATOR_SHORTNAME',
+ 'INT_MAX',
+ 'INT_MIN',
+ 'INVALID_HANDLE',
+ 'IS_DEBUG_MODE',
+ 'IS_PROFILE_MODE',
+ 'LICENSE_DEMO',
+ 'LICENSE_FREE',
+ 'LICENSE_FULL',
+ 'LICENSE_TIME',
+ 'LONG_MAX',
+ 'LONG_MIN',
+ 'MB_ABORTRETRYIGNORE',
+ 'MB_CANCELTRYCONTINUE',
+ 'MB_DEFBUTTON1',
+ 'MB_DEFBUTTON2',
+ 'MB_DEFBUTTON3',
+ 'MB_DEFBUTTON4',
+ 'MB_ICONASTERISK',
+ 'MB_ICONERROR',
+ 'MB_ICONEXCLAMATION',
+ 'MB_ICONHAND',
+ 'MB_ICONINFORMATION',
+ 'MB_ICONQUESTION',
+ 'MB_ICONSTOP',
+ 'MB_ICONWARNING',
+ 'MB_OKCANCEL',
+ 'MB_OK',
+ 'MB_RETRYCANCEL',
+ 'MB_YESNOCANCEL',
+ 'MB_YESNO',
+ 'MODE_ASK',
+ 'MODE_BID',
+ 'MODE_CHINKOUSPAN',
+ 'MODE_CLOSE',
+ 'MODE_DIGITS',
+ 'MODE_EMA',
+ 'MODE_EXPIRATION',
+ 'MODE_FREEZELEVEL',
+ 'MODE_GATORJAW',
+ 'MODE_GATORLIPS',
+ 'MODE_GATORTEETH',
+ 'MODE_HIGH',
+ 'MODE_KIJUNSEN',
+ 'MODE_LOTSIZE',
+ 'MODE_LOTSTEP',
+ 'MODE_LOWER',
+ 'MODE_LOW',
+ 'MODE_LWMA',
+ 'MODE_MAIN',
+ 'MODE_MARGINCALCMODE',
+ 'MODE_MARGINHEDGED',
+ 'MODE_MARGININIT',
+ 'MODE_MARGINMAINTENANCE',
+ 'MODE_MARGINREQUIRED',
+ 'MODE_MAXLOT',
+ 'MODE_MINLOT',
+ 'MODE_MINUSDI',
+ 'MODE_OPEN',
+ 'MODE_PLUSDI',
+ 'MODE_POINT',
+ 'MODE_PROFITCALCMODE',
+ 'MODE_SENKOUSPANA',
+ 'MODE_SENKOUSPANB',
+ 'MODE_SIGNAL',
+ 'MODE_SMA',
+ 'MODE_SMMA',
+ 'MODE_SPREAD',
+ 'MODE_STARTING',
+ 'MODE_STOPLEVEL',
+ 'MODE_SWAPLONG',
+ 'MODE_SWAPSHORT',
+ 'MODE_SWAPTYPE',
+ 'MODE_TENKANSEN',
+ 'MODE_TICKSIZE',
+ 'MODE_TICKVALUE',
+ 'MODE_TIME',
+ 'MODE_TRADEALLOWED',
+ 'MODE_UPPER',
+ 'MODE_VOLUME',
+ 'MONDAY',
+ 'MQL_DEBUG',
+ 'MQL_DLLS_ALLOWED',
+ 'MQL_FRAME_MODE',
+ 'MQL_LICENSE_TYPE',
+ 'MQL_OPTIMIZATION',
+ 'MQL_PROFILER',
+ 'MQL_PROGRAM_NAME',
+ 'MQL_PROGRAM_PATH',
+ 'MQL_PROGRAM_TYPE',
+ 'MQL_TESTER',
+ 'MQL_TRADE_ALLOWED',
+ 'MQL_VISUAL_MODE',
+ 'M_1_PI',
+ 'M_2_PI',
+ 'M_2_SQRTPI',
+ 'M_E',
+ 'M_LN2',
+ 'M_LN10',
+ 'M_LOG2E',
+ 'M_LOG10E',
+ 'M_PI',
+ 'M_PI_2',
+ 'M_PI_4',
+ 'M_SQRT1_2',
+ 'M_SQRT2',
+ 'NULL',
+ 'OBJPROP_ALIGN',
+ 'OBJPROP_ANCHOR',
+ 'OBJPROP_ANGLE',
+ 'OBJPROP_ARROWCODE',
+ 'OBJPROP_BACK',
+ 'OBJPROP_BGCOLOR',
+ 'OBJPROP_BMPFILE',
+ 'OBJPROP_BORDER_COLOR',
+ 'OBJPROP_BORDER_TYPE',
+ 'OBJPROP_CHART_ID',
+ 'OBJPROP_CHART_SCALE',
+ 'OBJPROP_COLOR',
+ 'OBJPROP_CORNER',
+ 'OBJPROP_CREATETIME',
+ 'OBJPROP_DATE_SCALE',
+ 'OBJPROP_DEVIATION',
+ 'OBJPROP_DRAWLINES',
+ 'OBJPROP_ELLIPSE',
+ 'OBJPROP_FIBOLEVELS',
+ 'OBJPROP_FILL',
+ 'OBJPROP_FIRSTLEVEL',
+ 'OBJPROP_FONTSIZE',
+ 'OBJPROP_FONT',
+ 'OBJPROP_HIDDEN',
+ 'OBJPROP_LEVELCOLOR',
+ 'OBJPROP_LEVELSTYLE',
+ 'OBJPROP_LEVELS',
+ 'OBJPROP_LEVELTEXT',
+ 'OBJPROP_LEVELVALUE',
+ 'OBJPROP_LEVELWIDTH',
+ 'OBJPROP_NAME',
+ 'OBJPROP_PERIOD',
+ 'OBJPROP_PRICE1',
+ 'OBJPROP_PRICE2',
+ 'OBJPROP_PRICE3',
+ 'OBJPROP_PRICE',
+ 'OBJPROP_PRICE_SCALE',
+ 'OBJPROP_RAY',
+ 'OBJPROP_RAY_RIGHT',
+ 'OBJPROP_READONLY',
+ 'OBJPROP_SCALE',
+ 'OBJPROP_SELECTABLE',
+ 'OBJPROP_SELECTED',
+ 'OBJPROP_STATE',
+ 'OBJPROP_STYLE',
+ 'OBJPROP_SYMBOL',
+ 'OBJPROP_TEXT',
+ 'OBJPROP_TIME1',
+ 'OBJPROP_TIME2',
+ 'OBJPROP_TIME3',
+ 'OBJPROP_TIMEFRAMES',
+ 'OBJPROP_TIME',
+ 'OBJPROP_TOOLTIP',
+ 'OBJPROP_TYPE',
+ 'OBJPROP_WIDTH',
+ 'OBJPROP_XDISTANCE',
+ 'OBJPROP_XOFFSET',
+ 'OBJPROP_XSIZE',
+ 'OBJPROP_YDISTANCE',
+ 'OBJPROP_YOFFSET',
+ 'OBJPROP_YSIZE',
+ 'OBJPROP_ZORDER',
+ 'OBJ_ALL_PERIODS',
+ 'OBJ_ARROW',
+ 'OBJ_ARROW_BUY',
+ 'OBJ_ARROW_CHECK',
+ 'OBJ_ARROW_DOWN',
+ 'OBJ_ARROW_LEFT_PRICE',
+ 'OBJ_ARROW_RIGHT_PRICE',
+ 'OBJ_ARROW_SELL',
+ 'OBJ_ARROW_STOP',
+ 'OBJ_ARROW_THUMB_DOWN',
+ 'OBJ_ARROW_THUMB_UP',
+ 'OBJ_ARROW_UP',
+ 'OBJ_BITMAP',
+ 'OBJ_BITMAP_LABEL',
+ 'OBJ_BUTTON',
+ 'OBJ_CHANNEL',
+ 'OBJ_CYCLES',
+ 'OBJ_EDIT',
+ 'OBJ_ELLIPSE',
+ 'OBJ_EVENT',
+ 'OBJ_EXPANSION',
+ 'OBJ_FIBOARC',
+ 'OBJ_FIBOCHANNEL',
+ 'OBJ_FIBOFAN',
+ 'OBJ_FIBOTIMES',
+ 'OBJ_FIBO',
+ 'OBJ_GANNFAN',
+ 'OBJ_GANNGRID',
+ 'OBJ_GANNLINE',
+ 'OBJ_HLINE',
+ 'OBJ_LABEL',
+ 'OBJ_NO_PERIODS',
+ 'OBJ_PERIOD_D1',
+ 'OBJ_PERIOD_H1',
+ 'OBJ_PERIOD_H4',
+ 'OBJ_PERIOD_M1',
+ 'OBJ_PERIOD_M5',
+ 'OBJ_PERIOD_M15',
+ 'OBJ_PERIOD_M30',
+ 'OBJ_PERIOD_MN1',
+ 'OBJ_PERIOD_W1',
+ 'OBJ_PITCHFORK',
+ 'OBJ_RECTANGLE',
+ 'OBJ_RECTANGLE_LABEL',
+ 'OBJ_REGRESSION',
+ 'OBJ_STDDEVCHANNEL',
+ 'OBJ_TEXT',
+ 'OBJ_TRENDBYANGLE',
+ 'OBJ_TREND',
+ 'OBJ_TRIANGLE',
+ 'OBJ_VLINE',
+ 'OP_BUYLIMIT',
+ 'OP_BUYSTOP',
+ 'OP_BUY',
+ 'OP_SELLLIMIT',
+ 'OP_SELLSTOP',
+ 'OP_SELL',
+ 'PERIOD_CURRENT',
+ 'PERIOD_D1',
+ 'PERIOD_H1',
+ 'PERIOD_H2',
+ 'PERIOD_H3',
+ 'PERIOD_H4',
+ 'PERIOD_H6',
+ 'PERIOD_H8',
+ 'PERIOD_H12',
+ 'PERIOD_M1',
+ 'PERIOD_M2',
+ 'PERIOD_M3',
+ 'PERIOD_M4',
+ 'PERIOD_M5',
+ 'PERIOD_M6',
+ 'PERIOD_M10',
+ 'PERIOD_M12',
+ 'PERIOD_M15',
+ 'PERIOD_M20',
+ 'PERIOD_M30',
+ 'PERIOD_MN1',
+ 'PERIOD_W1',
+ 'POINTER_AUTOMATIC',
+ 'POINTER_DYNAMIC',
+ 'POINTER_INVALID',
+ 'PRICE_CLOSE',
+ 'PRICE_HIGH',
+ 'PRICE_LOW',
+ 'PRICE_MEDIAN',
+ 'PRICE_OPEN',
+ 'PRICE_TYPICAL',
+ 'PRICE_WEIGHTED',
+ 'PROGRAM_EXPERT',
+ 'PROGRAM_INDICATOR',
+ 'PROGRAM_SCRIPT',
+ 'REASON_ACCOUNT',
+ 'REASON_CHARTCHANGE',
+ 'REASON_CHARTCLOSE',
+ 'REASON_CLOSE',
+ 'REASON_INITFAILED',
+ 'REASON_PARAMETERS',
+ 'REASON_PROGRAM'
+ 'REASON_RECOMPILE',
+ 'REASON_REMOVE',
+ 'REASON_TEMPLATE',
+ 'SATURDAY',
+ 'SEEK_CUR',
+ 'SEEK_END',
+ 'SEEK_SET',
+ 'SERIES_BARS_COUNT',
+ 'SERIES_FIRSTDATE',
+ 'SERIES_LASTBAR_DATE',
+ 'SERIES_SERVER_FIRSTDATE',
+ 'SERIES_SYNCHRONIZED',
+ 'SERIES_TERMINAL_FIRSTDATE',
+ 'SHORT_MAX',
+ 'SHORT_MIN',
+ 'STAT_BALANCEDD_PERCENT',
+ 'STAT_BALANCEMIN',
+ 'STAT_BALANCE_DDREL_PERCENT',
+ 'STAT_BALANCE_DD',
+ 'STAT_BALANCE_DD_RELATIVE',
+ 'STAT_CONLOSSMAX',
+ 'STAT_CONLOSSMAX_TRADES',
+ 'STAT_CONPROFITMAX',
+ 'STAT_CONPROFITMAX_TRADES',
+ 'STAT_CUSTOM_ONTESTER',
+ 'STAT_DEALS',
+ 'STAT_EQUITYDD_PERCENT',
+ 'STAT_EQUITYMIN',
+ 'STAT_EQUITY_DDREL_PERCENT',
+ 'STAT_EQUITY_DD',
+ 'STAT_EQUITY_DD_RELATIVE',
+ 'STAT_EXPECTED_PAYOFF',
+ 'STAT_GROSS_LOSS',
+ 'STAT_GROSS_PROFIT',
+ 'STAT_INITIAL_DEPOSIT',
+ 'STAT_LONG_TRADES',
+ 'STAT_LOSSTRADES_AVGCON',
+ 'STAT_LOSS_TRADES',
+ 'STAT_MAX_CONLOSSES',
+ 'STAT_MAX_CONLOSS_TRADES',
+ 'STAT_MAX_CONPROFIT_TRADES',
+ 'STAT_MAX_CONWINS',
+ 'STAT_MAX_LOSSTRADE',
+ 'STAT_MAX_PROFITTRADE',
+ 'STAT_MIN_MARGINLEVEL',
+ 'STAT_PROFITTRADES_AVGCON',
+ 'STAT_PROFIT',
+ 'STAT_PROFIT_FACTOR',
+ 'STAT_PROFIT_LONGTRADES',
+ 'STAT_PROFIT_SHORTTRADES',
+ 'STAT_PROFIT_TRADES',
+ 'STAT_RECOVERY_FACTOR',
+ 'STAT_SHARPE_RATIO',
+ 'STAT_SHORT_TRADES',
+ 'STAT_TRADES',
+ 'STAT_WITHDRAWAL',
+ 'STO_CLOSECLOSE',
+ 'STO_LOWHIGH',
+ 'STYLE_DASHDOTDOT',
+ 'STYLE_DASHDOT',
+ 'STYLE_DASH',
+ 'STYLE_DOT',
+ 'STYLE_SOLID',
+ 'SUNDAY',
+ 'SYMBOL_ARROWDOWN',
+ 'SYMBOL_ARROWUP',
+ 'SYMBOL_CHECKSIGN',
+ 'SYMBOL_LEFTPRICE',
+ 'SYMBOL_RIGHTPRICE',
+ 'SYMBOL_STOPSIGN',
+ 'SYMBOL_THUMBSDOWN',
+ 'SYMBOL_THUMBSUP',
+ 'TERMINAL_BUILD',
+ 'TERMINAL_CODEPAGE',
+ 'TERMINAL_COMMONDATA_PATH',
+ 'TERMINAL_COMPANY',
+ 'TERMINAL_CONNECTED',
+ 'TERMINAL_CPU_CORES',
+ 'TERMINAL_DATA_PATH',
+ 'TERMINAL_DISK_SPACE',
+ 'TERMINAL_DLLS_ALLOWED',
+ 'TERMINAL_EMAIL_ENABLED',
+ 'TERMINAL_FTP_ENABLED',
+ 'TERMINAL_LANGUAGE',
+ 'TERMINAL_MAXBARS',
+ 'TERMINAL_MEMORY_AVAILABLE',
+ 'TERMINAL_MEMORY_PHYSICAL',
+ 'TERMINAL_MEMORY_TOTAL',
+ 'TERMINAL_MEMORY_USED',
+ 'TERMINAL_NAME',
+ 'TERMINAL_OPENCL_SUPPORT',
+ 'TERMINAL_PATH',
+ 'TERMINAL_TRADE_ALLOWED',
+ 'TERMINAL_X64',
+ 'THURSDAY',
+ 'TRADE_ACTION_DEAL',
+ 'TRADE_ACTION_MODIFY',
+ 'TRADE_ACTION_PENDING',
+ 'TRADE_ACTION_REMOVE',
+ 'TRADE_ACTION_SLTP',
+ 'TUESDAY',
+ 'UCHAR_MAX',
+ 'UINT_MAX',
+ 'ULONG_MAX',
+ 'USHORT_MAX',
+ 'VOLUME_REAL',
+ 'VOLUME_TICK',
+ 'WEDNESDAY',
+ 'WHOLE_ARRAY',
+ 'WRONG_VALUE',
+ 'clrNONE',
+ '__DATETIME__',
+ '__DATE__',
+ '__FILE__',
+ '__FUNCSIG__',
+ '__FUNCTION__',
+ '__LINE__',
+ '__MQL4BUILD__',
+ '__MQLBUILD__',
+ '__PATH__',
+)
+
+colors = (
+ 'AliceBlue',
+ 'AntiqueWhite',
+ 'Aquamarine',
+ 'Aqua',
+ 'Beige',
+ 'Bisque',
+ 'Black',
+ 'BlanchedAlmond',
+ 'BlueViolet',
+ 'Blue',
+ 'Brown',
+ 'BurlyWood',
+ 'CadetBlue',
+ 'Chartreuse',
+ 'Chocolate',
+ 'Coral',
+ 'CornflowerBlue',
+ 'Cornsilk',
+ 'Crimson',
+ 'DarkBlue',
+ 'DarkGoldenrod',
+ 'DarkGray',
+ 'DarkGreen',
+ 'DarkKhaki',
+ 'DarkOliveGreen',
+ 'DarkOrange',
+ 'DarkOrchid',
+ 'DarkSalmon',
+ 'DarkSeaGreen',
+ 'DarkSlateBlue',
+ 'DarkSlateGray',
+ 'DarkTurquoise',
+ 'DarkViolet',
+ 'DeepPink',
+ 'DeepSkyBlue',
+ 'DimGray',
+ 'DodgerBlue',
+ 'FireBrick',
+ 'ForestGreen',
+ 'Gainsboro',
+ 'Goldenrod',
+ 'Gold',
+ 'Gray',
+ 'GreenYellow',
+ 'Green',
+ 'Honeydew',
+ 'HotPink',
+ 'IndianRed',
+ 'Indigo',
+ 'Ivory',
+ 'Khaki',
+ 'LavenderBlush',
+ 'Lavender',
+ 'LawnGreen',
+ 'LemonChiffon',
+ 'LightBlue',
+ 'LightCoral',
+ 'LightCyan',
+ 'LightGoldenrod',
+ 'LightGray',
+ 'LightGreen',
+ 'LightPink',
+ 'LightSalmon',
+ 'LightSeaGreen',
+ 'LightSkyBlue',
+ 'LightSlateGray',
+ 'LightSteelBlue',
+ 'LightYellow',
+ 'LimeGreen',
+ 'Lime',
+ 'Linen',
+ 'Magenta',
+ 'Maroon',
+ 'MediumAquamarine',
+ 'MediumBlue',
+ 'MediumOrchid',
+ 'MediumPurple',
+ 'MediumSeaGreen',
+ 'MediumSlateBlue',
+ 'MediumSpringGreen',
+ 'MediumTurquoise',
+ 'MediumVioletRed',
+ 'MidnightBlue',
+ 'MintCream',
+ 'MistyRose',
+ 'Moccasin',
+ 'NavajoWhite',
+ 'Navy',
+ 'OldLace',
+ 'OliveDrab',
+ 'Olive',
+ 'OrangeRed',
+ 'Orange',
+ 'Orchid',
+ 'PaleGoldenrod',
+ 'PaleGreen',
+ 'PaleTurquoise',
+ 'PaleVioletRed',
+ 'PapayaWhip',
+ 'PeachPuff',
+ 'Peru',
+ 'Pink',
+ 'Plum',
+ 'PowderBlue',
+ 'Purple',
+ 'Red',
+ 'RosyBrown',
+ 'RoyalBlue',
+ 'SaddleBrown',
+ 'Salmon',
+ 'SandyBrown',
+ 'SeaGreen',
+ 'Seashell',
+ 'Sienna',
+ 'Silver',
+ 'SkyBlue',
+ 'SlateBlue',
+ 'SlateGray',
+ 'Snow',
+ 'SpringGreen',
+ 'SteelBlue',
+ 'Tan',
+ 'Teal',
+ 'Thistle',
+ 'Tomato',
+ 'Turquoise',
+ 'Violet',
+ 'Wheat',
+ 'WhiteSmoke',
+ 'White',
+ 'YellowGreen',
+ 'Yellow',
+)
+
+keywords = (
+ 'input', '_Digits', '_Point', '_LastError', '_Period', '_RandomSeed',
+ '_StopFlag', '_Symbol', '_UninitReason', 'Ask', 'Bars', 'Bid',
+ 'Close', 'Digits', 'High', 'Low', 'Open', 'Point', 'Time',
+ 'Volume',
+)
+c_types = (
+ 'void', 'char', 'uchar', 'bool', 'short', 'ushort', 'int', 'uint',
+ 'color', 'long', 'ulong', 'datetime', 'float', 'double',
+ 'string',
+)
diff --git a/pygments/lexers/_mysql_builtins.py b/pygments/lexers/_mysql_builtins.py
new file mode 100644
index 0000000..d266789
--- /dev/null
+++ b/pygments/lexers/_mysql_builtins.py
@@ -0,0 +1,1335 @@
+"""
+ pygments.lexers._mysql_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Self-updating data files for the MySQL lexer.
+
+ Run with `python -I` to update.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+MYSQL_CONSTANTS = (
+ 'false',
+ 'null',
+ 'true',
+ 'unknown',
+)
+
+
+# At this time, no easily-parsed, definitive list of data types
+# has been found in the MySQL source code or documentation. (The
+# `sql/sql_yacc.yy` file is definitive but is difficult to parse.)
+# Therefore these types are currently maintained manually.
+#
+# Some words in this list -- like "long", "national", "precision",
+# and "varying" -- appear to only occur in combination with other
+# data type keywords. Therefore they are included as separate words
+# even though they do not naturally occur in syntax separately.
+#
+# This list is also used to strip data types out of the list of
+# MySQL keywords, which is automatically updated later in the file.
+#
+MYSQL_DATATYPES = (
+ # Numeric data types
+ 'bigint',
+ 'bit',
+ 'bool',
+ 'boolean',
+ 'dec',
+ 'decimal',
+ 'double',
+ 'fixed',
+ 'float',
+ 'float4',
+ 'float8',
+ 'int',
+ 'int1',
+ 'int2',
+ 'int3',
+ 'int4',
+ 'int8',
+ 'integer',
+ 'mediumint',
+ 'middleint',
+ 'numeric',
+ 'precision',
+ 'real',
+ 'serial',
+ 'smallint',
+ 'tinyint',
+
+ # Date and time data types
+ 'date',
+ 'datetime',
+ 'time',
+ 'timestamp',
+ 'year',
+
+ # String data types
+ 'binary',
+ 'blob',
+ 'char',
+ 'enum',
+ 'long',
+ 'longblob',
+ 'longtext',
+ 'mediumblob',
+ 'mediumtext',
+ 'national',
+ 'nchar',
+ 'nvarchar',
+ 'set',
+ 'text',
+ 'tinyblob',
+ 'tinytext',
+ 'varbinary',
+ 'varchar',
+ 'varcharacter',
+ 'varying',
+
+ # Spatial data types
+ 'geometry',
+ 'geometrycollection',
+ 'linestring',
+ 'multilinestring',
+ 'multipoint',
+ 'multipolygon',
+ 'point',
+ 'polygon',
+
+ # JSON data types
+ 'json',
+)
+
+# Everything below this line is auto-generated from the MySQL source code.
+# Run this file in Python and it will update itself.
+# -----------------------------------------------------------------------------
+
+MYSQL_FUNCTIONS = (
+ 'abs',
+ 'acos',
+ 'adddate',
+ 'addtime',
+ 'aes_decrypt',
+ 'aes_encrypt',
+ 'any_value',
+ 'asin',
+ 'atan',
+ 'atan2',
+ 'benchmark',
+ 'bin',
+ 'bin_to_uuid',
+ 'bit_and',
+ 'bit_count',
+ 'bit_length',
+ 'bit_or',
+ 'bit_xor',
+ 'can_access_column',
+ 'can_access_database',
+ 'can_access_event',
+ 'can_access_resource_group',
+ 'can_access_routine',
+ 'can_access_table',
+ 'can_access_trigger',
+ 'can_access_user',
+ 'can_access_view',
+ 'cast',
+ 'ceil',
+ 'ceiling',
+ 'char_length',
+ 'character_length',
+ 'coercibility',
+ 'compress',
+ 'concat',
+ 'concat_ws',
+ 'connection_id',
+ 'conv',
+ 'convert_cpu_id_mask',
+ 'convert_interval_to_user_interval',
+ 'convert_tz',
+ 'cos',
+ 'cot',
+ 'count',
+ 'crc32',
+ 'curdate',
+ 'current_role',
+ 'curtime',
+ 'date_add',
+ 'date_format',
+ 'date_sub',
+ 'datediff',
+ 'dayname',
+ 'dayofmonth',
+ 'dayofweek',
+ 'dayofyear',
+ 'degrees',
+ 'elt',
+ 'exp',
+ 'export_set',
+ 'extract',
+ 'extractvalue',
+ 'field',
+ 'find_in_set',
+ 'floor',
+ 'format_bytes',
+ 'format_pico_time',
+ 'found_rows',
+ 'from_base64',
+ 'from_days',
+ 'from_unixtime',
+ 'get_dd_column_privileges',
+ 'get_dd_create_options',
+ 'get_dd_index_private_data',
+ 'get_dd_index_sub_part_length',
+ 'get_dd_property_key_value',
+ 'get_dd_schema_options',
+ 'get_dd_tablespace_private_data',
+ 'get_lock',
+ 'greatest',
+ 'group_concat',
+ 'gtid_subset',
+ 'gtid_subtract',
+ 'hex',
+ 'icu_version',
+ 'ifnull',
+ 'inet6_aton',
+ 'inet6_ntoa',
+ 'inet_aton',
+ 'inet_ntoa',
+ 'instr',
+ 'internal_auto_increment',
+ 'internal_avg_row_length',
+ 'internal_check_time',
+ 'internal_checksum',
+ 'internal_data_free',
+ 'internal_data_length',
+ 'internal_dd_char_length',
+ 'internal_get_comment_or_error',
+ 'internal_get_dd_column_extra',
+ 'internal_get_enabled_role_json',
+ 'internal_get_hostname',
+ 'internal_get_mandatory_roles_json',
+ 'internal_get_partition_nodegroup',
+ 'internal_get_username',
+ 'internal_get_view_warning_or_error',
+ 'internal_index_column_cardinality',
+ 'internal_index_length',
+ 'internal_is_enabled_role',
+ 'internal_is_mandatory_role',
+ 'internal_keys_disabled',
+ 'internal_max_data_length',
+ 'internal_table_rows',
+ 'internal_tablespace_autoextend_size',
+ 'internal_tablespace_data_free',
+ 'internal_tablespace_extent_size',
+ 'internal_tablespace_extra',
+ 'internal_tablespace_free_extents',
+ 'internal_tablespace_id',
+ 'internal_tablespace_initial_size',
+ 'internal_tablespace_logfile_group_name',
+ 'internal_tablespace_logfile_group_number',
+ 'internal_tablespace_maximum_size',
+ 'internal_tablespace_row_format',
+ 'internal_tablespace_status',
+ 'internal_tablespace_total_extents',
+ 'internal_tablespace_type',
+ 'internal_tablespace_version',
+ 'internal_update_time',
+ 'is_free_lock',
+ 'is_ipv4',
+ 'is_ipv4_compat',
+ 'is_ipv4_mapped',
+ 'is_ipv6',
+ 'is_used_lock',
+ 'is_uuid',
+ 'is_visible_dd_object',
+ 'isnull',
+ 'json_array',
+ 'json_array_append',
+ 'json_array_insert',
+ 'json_arrayagg',
+ 'json_contains',
+ 'json_contains_path',
+ 'json_depth',
+ 'json_extract',
+ 'json_insert',
+ 'json_keys',
+ 'json_length',
+ 'json_merge',
+ 'json_merge_patch',
+ 'json_merge_preserve',
+ 'json_object',
+ 'json_objectagg',
+ 'json_overlaps',
+ 'json_pretty',
+ 'json_quote',
+ 'json_remove',
+ 'json_replace',
+ 'json_schema_valid',
+ 'json_schema_validation_report',
+ 'json_search',
+ 'json_set',
+ 'json_storage_free',
+ 'json_storage_size',
+ 'json_type',
+ 'json_unquote',
+ 'json_valid',
+ 'last_day',
+ 'last_insert_id',
+ 'lcase',
+ 'least',
+ 'length',
+ 'like_range_max',
+ 'like_range_min',
+ 'ln',
+ 'load_file',
+ 'locate',
+ 'log',
+ 'log10',
+ 'log2',
+ 'lower',
+ 'lpad',
+ 'ltrim',
+ 'make_set',
+ 'makedate',
+ 'maketime',
+ 'master_pos_wait',
+ 'max',
+ 'mbrcontains',
+ 'mbrcoveredby',
+ 'mbrcovers',
+ 'mbrdisjoint',
+ 'mbrequals',
+ 'mbrintersects',
+ 'mbroverlaps',
+ 'mbrtouches',
+ 'mbrwithin',
+ 'md5',
+ 'mid',
+ 'min',
+ 'monthname',
+ 'name_const',
+ 'now',
+ 'nullif',
+ 'oct',
+ 'octet_length',
+ 'ord',
+ 'period_add',
+ 'period_diff',
+ 'pi',
+ 'position',
+ 'pow',
+ 'power',
+ 'ps_current_thread_id',
+ 'ps_thread_id',
+ 'quote',
+ 'radians',
+ 'rand',
+ 'random_bytes',
+ 'regexp_instr',
+ 'regexp_like',
+ 'regexp_replace',
+ 'regexp_substr',
+ 'release_all_locks',
+ 'release_lock',
+ 'remove_dd_property_key',
+ 'reverse',
+ 'roles_graphml',
+ 'round',
+ 'rpad',
+ 'rtrim',
+ 'sec_to_time',
+ 'session_user',
+ 'sha',
+ 'sha1',
+ 'sha2',
+ 'sign',
+ 'sin',
+ 'sleep',
+ 'soundex',
+ 'source_pos_wait',
+ 'space',
+ 'sqrt',
+ 'st_area',
+ 'st_asbinary',
+ 'st_asgeojson',
+ 'st_astext',
+ 'st_aswkb',
+ 'st_aswkt',
+ 'st_buffer',
+ 'st_buffer_strategy',
+ 'st_centroid',
+ 'st_collect',
+ 'st_contains',
+ 'st_convexhull',
+ 'st_crosses',
+ 'st_difference',
+ 'st_dimension',
+ 'st_disjoint',
+ 'st_distance',
+ 'st_distance_sphere',
+ 'st_endpoint',
+ 'st_envelope',
+ 'st_equals',
+ 'st_exteriorring',
+ 'st_frechetdistance',
+ 'st_geohash',
+ 'st_geomcollfromtext',
+ 'st_geomcollfromtxt',
+ 'st_geomcollfromwkb',
+ 'st_geometrycollectionfromtext',
+ 'st_geometrycollectionfromwkb',
+ 'st_geometryfromtext',
+ 'st_geometryfromwkb',
+ 'st_geometryn',
+ 'st_geometrytype',
+ 'st_geomfromgeojson',
+ 'st_geomfromtext',
+ 'st_geomfromwkb',
+ 'st_hausdorffdistance',
+ 'st_interiorringn',
+ 'st_intersection',
+ 'st_intersects',
+ 'st_isclosed',
+ 'st_isempty',
+ 'st_issimple',
+ 'st_isvalid',
+ 'st_latfromgeohash',
+ 'st_latitude',
+ 'st_length',
+ 'st_linefromtext',
+ 'st_linefromwkb',
+ 'st_lineinterpolatepoint',
+ 'st_lineinterpolatepoints',
+ 'st_linestringfromtext',
+ 'st_linestringfromwkb',
+ 'st_longfromgeohash',
+ 'st_longitude',
+ 'st_makeenvelope',
+ 'st_mlinefromtext',
+ 'st_mlinefromwkb',
+ 'st_mpointfromtext',
+ 'st_mpointfromwkb',
+ 'st_mpolyfromtext',
+ 'st_mpolyfromwkb',
+ 'st_multilinestringfromtext',
+ 'st_multilinestringfromwkb',
+ 'st_multipointfromtext',
+ 'st_multipointfromwkb',
+ 'st_multipolygonfromtext',
+ 'st_multipolygonfromwkb',
+ 'st_numgeometries',
+ 'st_numinteriorring',
+ 'st_numinteriorrings',
+ 'st_numpoints',
+ 'st_overlaps',
+ 'st_pointatdistance',
+ 'st_pointfromgeohash',
+ 'st_pointfromtext',
+ 'st_pointfromwkb',
+ 'st_pointn',
+ 'st_polyfromtext',
+ 'st_polyfromwkb',
+ 'st_polygonfromtext',
+ 'st_polygonfromwkb',
+ 'st_simplify',
+ 'st_srid',
+ 'st_startpoint',
+ 'st_swapxy',
+ 'st_symdifference',
+ 'st_touches',
+ 'st_transform',
+ 'st_union',
+ 'st_validate',
+ 'st_within',
+ 'st_x',
+ 'st_y',
+ 'statement_digest',
+ 'statement_digest_text',
+ 'std',
+ 'stddev',
+ 'stddev_pop',
+ 'stddev_samp',
+ 'str_to_date',
+ 'strcmp',
+ 'subdate',
+ 'substr',
+ 'substring',
+ 'substring_index',
+ 'subtime',
+ 'sum',
+ 'sysdate',
+ 'system_user',
+ 'tan',
+ 'time_format',
+ 'time_to_sec',
+ 'timediff',
+ 'to_base64',
+ 'to_days',
+ 'to_seconds',
+ 'trim',
+ 'ucase',
+ 'uncompress',
+ 'uncompressed_length',
+ 'unhex',
+ 'unix_timestamp',
+ 'updatexml',
+ 'upper',
+ 'uuid',
+ 'uuid_short',
+ 'uuid_to_bin',
+ 'validate_password_strength',
+ 'var_pop',
+ 'var_samp',
+ 'variance',
+ 'version',
+ 'wait_for_executed_gtid_set',
+ 'wait_until_sql_thread_after_gtids',
+ 'weekday',
+ 'weekofyear',
+ 'yearweek',
+)
+
+
+MYSQL_OPTIMIZER_HINTS = (
+ 'bka',
+ 'bnl',
+ 'derived_condition_pushdown',
+ 'dupsweedout',
+ 'firstmatch',
+ 'group_index',
+ 'hash_join',
+ 'index',
+ 'index_merge',
+ 'intoexists',
+ 'join_fixed_order',
+ 'join_index',
+ 'join_order',
+ 'join_prefix',
+ 'join_suffix',
+ 'loosescan',
+ 'materialization',
+ 'max_execution_time',
+ 'merge',
+ 'mrr',
+ 'no_bka',
+ 'no_bnl',
+ 'no_derived_condition_pushdown',
+ 'no_group_index',
+ 'no_hash_join',
+ 'no_icp',
+ 'no_index',
+ 'no_index_merge',
+ 'no_join_index',
+ 'no_merge',
+ 'no_mrr',
+ 'no_order_index',
+ 'no_range_optimization',
+ 'no_semijoin',
+ 'no_skip_scan',
+ 'order_index',
+ 'qb_name',
+ 'resource_group',
+ 'semijoin',
+ 'set_var',
+ 'skip_scan',
+ 'subquery',
+)
+
+
+MYSQL_KEYWORDS = (
+ 'accessible',
+ 'account',
+ 'action',
+ 'active',
+ 'add',
+ 'admin',
+ 'after',
+ 'against',
+ 'aggregate',
+ 'algorithm',
+ 'all',
+ 'alter',
+ 'always',
+ 'analyze',
+ 'and',
+ 'any',
+ 'array',
+ 'as',
+ 'asc',
+ 'ascii',
+ 'asensitive',
+ 'assign_gtids_to_anonymous_transactions',
+ 'at',
+ 'attribute',
+ 'authentication',
+ 'auto_increment',
+ 'autoextend_size',
+ 'avg',
+ 'avg_row_length',
+ 'backup',
+ 'before',
+ 'begin',
+ 'between',
+ 'binlog',
+ 'block',
+ 'both',
+ 'btree',
+ 'buckets',
+ 'by',
+ 'byte',
+ 'cache',
+ 'call',
+ 'cascade',
+ 'cascaded',
+ 'case',
+ 'catalog_name',
+ 'chain',
+ 'challenge_response',
+ 'change',
+ 'changed',
+ 'channel',
+ 'character',
+ 'charset',
+ 'check',
+ 'checksum',
+ 'cipher',
+ 'class_origin',
+ 'client',
+ 'clone',
+ 'close',
+ 'coalesce',
+ 'code',
+ 'collate',
+ 'collation',
+ 'column',
+ 'column_format',
+ 'column_name',
+ 'columns',
+ 'comment',
+ 'commit',
+ 'committed',
+ 'compact',
+ 'completion',
+ 'component',
+ 'compressed',
+ 'compression',
+ 'concurrent',
+ 'condition',
+ 'connection',
+ 'consistent',
+ 'constraint',
+ 'constraint_catalog',
+ 'constraint_name',
+ 'constraint_schema',
+ 'contains',
+ 'context',
+ 'continue',
+ 'convert',
+ 'cpu',
+ 'create',
+ 'cross',
+ 'cube',
+ 'cume_dist',
+ 'current',
+ 'current_date',
+ 'current_time',
+ 'current_timestamp',
+ 'current_user',
+ 'cursor',
+ 'cursor_name',
+ 'data',
+ 'database',
+ 'databases',
+ 'datafile',
+ 'day',
+ 'day_hour',
+ 'day_microsecond',
+ 'day_minute',
+ 'day_second',
+ 'deallocate',
+ 'declare',
+ 'default',
+ 'default_auth',
+ 'definer',
+ 'definition',
+ 'delay_key_write',
+ 'delayed',
+ 'delete',
+ 'dense_rank',
+ 'desc',
+ 'describe',
+ 'description',
+ 'deterministic',
+ 'diagnostics',
+ 'directory',
+ 'disable',
+ 'discard',
+ 'disk',
+ 'distinct',
+ 'distinctrow',
+ 'div',
+ 'do',
+ 'drop',
+ 'dual',
+ 'dumpfile',
+ 'duplicate',
+ 'dynamic',
+ 'each',
+ 'else',
+ 'elseif',
+ 'empty',
+ 'enable',
+ 'enclosed',
+ 'encryption',
+ 'end',
+ 'ends',
+ 'enforced',
+ 'engine',
+ 'engine_attribute',
+ 'engines',
+ 'error',
+ 'errors',
+ 'escape',
+ 'escaped',
+ 'event',
+ 'events',
+ 'every',
+ 'except',
+ 'exchange',
+ 'exclude',
+ 'execute',
+ 'exists',
+ 'exit',
+ 'expansion',
+ 'expire',
+ 'explain',
+ 'export',
+ 'extended',
+ 'extent_size',
+ 'factor',
+ 'failed_login_attempts',
+ 'false',
+ 'fast',
+ 'faults',
+ 'fetch',
+ 'fields',
+ 'file',
+ 'file_block_size',
+ 'filter',
+ 'finish',
+ 'first',
+ 'first_value',
+ 'flush',
+ 'following',
+ 'follows',
+ 'for',
+ 'force',
+ 'foreign',
+ 'format',
+ 'found',
+ 'from',
+ 'full',
+ 'fulltext',
+ 'function',
+ 'general',
+ 'generated',
+ 'geomcollection',
+ 'get',
+ 'get_format',
+ 'get_master_public_key',
+ 'get_source_public_key',
+ 'global',
+ 'grant',
+ 'grants',
+ 'group',
+ 'group_replication',
+ 'grouping',
+ 'groups',
+ 'gtid_only',
+ 'handler',
+ 'hash',
+ 'having',
+ 'help',
+ 'high_priority',
+ 'histogram',
+ 'history',
+ 'host',
+ 'hosts',
+ 'hour',
+ 'hour_microsecond',
+ 'hour_minute',
+ 'hour_second',
+ 'identified',
+ 'if',
+ 'ignore',
+ 'ignore_server_ids',
+ 'import',
+ 'in',
+ 'inactive',
+ 'index',
+ 'indexes',
+ 'infile',
+ 'initial',
+ 'initial_size',
+ 'initiate',
+ 'inner',
+ 'inout',
+ 'insensitive',
+ 'insert',
+ 'insert_method',
+ 'install',
+ 'instance',
+ 'interval',
+ 'into',
+ 'invisible',
+ 'invoker',
+ 'io',
+ 'io_after_gtids',
+ 'io_before_gtids',
+ 'io_thread',
+ 'ipc',
+ 'is',
+ 'isolation',
+ 'issuer',
+ 'iterate',
+ 'join',
+ 'json_table',
+ 'json_value',
+ 'key',
+ 'key_block_size',
+ 'keyring',
+ 'keys',
+ 'kill',
+ 'lag',
+ 'language',
+ 'last',
+ 'last_value',
+ 'lateral',
+ 'lead',
+ 'leading',
+ 'leave',
+ 'leaves',
+ 'left',
+ 'less',
+ 'level',
+ 'like',
+ 'limit',
+ 'linear',
+ 'lines',
+ 'list',
+ 'load',
+ 'local',
+ 'localtime',
+ 'localtimestamp',
+ 'lock',
+ 'locked',
+ 'locks',
+ 'logfile',
+ 'logs',
+ 'loop',
+ 'low_priority',
+ 'master',
+ 'master_auto_position',
+ 'master_bind',
+ 'master_compression_algorithms',
+ 'master_connect_retry',
+ 'master_delay',
+ 'master_heartbeat_period',
+ 'master_host',
+ 'master_log_file',
+ 'master_log_pos',
+ 'master_password',
+ 'master_port',
+ 'master_public_key_path',
+ 'master_retry_count',
+ 'master_ssl',
+ 'master_ssl_ca',
+ 'master_ssl_capath',
+ 'master_ssl_cert',
+ 'master_ssl_cipher',
+ 'master_ssl_crl',
+ 'master_ssl_crlpath',
+ 'master_ssl_key',
+ 'master_ssl_verify_server_cert',
+ 'master_tls_ciphersuites',
+ 'master_tls_version',
+ 'master_user',
+ 'master_zstd_compression_level',
+ 'match',
+ 'max_connections_per_hour',
+ 'max_queries_per_hour',
+ 'max_rows',
+ 'max_size',
+ 'max_updates_per_hour',
+ 'max_user_connections',
+ 'maxvalue',
+ 'medium',
+ 'member',
+ 'memory',
+ 'merge',
+ 'message_text',
+ 'microsecond',
+ 'migrate',
+ 'min_rows',
+ 'minute',
+ 'minute_microsecond',
+ 'minute_second',
+ 'mod',
+ 'mode',
+ 'modifies',
+ 'modify',
+ 'month',
+ 'mutex',
+ 'mysql_errno',
+ 'name',
+ 'names',
+ 'natural',
+ 'ndb',
+ 'ndbcluster',
+ 'nested',
+ 'network_namespace',
+ 'never',
+ 'new',
+ 'next',
+ 'no',
+ 'no_wait',
+ 'no_write_to_binlog',
+ 'nodegroup',
+ 'none',
+ 'not',
+ 'nowait',
+ 'nth_value',
+ 'ntile',
+ 'null',
+ 'nulls',
+ 'number',
+ 'of',
+ 'off',
+ 'offset',
+ 'oj',
+ 'old',
+ 'on',
+ 'one',
+ 'only',
+ 'open',
+ 'optimize',
+ 'optimizer_costs',
+ 'option',
+ 'optional',
+ 'optionally',
+ 'options',
+ 'or',
+ 'order',
+ 'ordinality',
+ 'organization',
+ 'others',
+ 'out',
+ 'outer',
+ 'outfile',
+ 'over',
+ 'owner',
+ 'pack_keys',
+ 'page',
+ 'parser',
+ 'partial',
+ 'partition',
+ 'partitioning',
+ 'partitions',
+ 'password',
+ 'password_lock_time',
+ 'path',
+ 'percent_rank',
+ 'persist',
+ 'persist_only',
+ 'phase',
+ 'plugin',
+ 'plugin_dir',
+ 'plugins',
+ 'port',
+ 'precedes',
+ 'preceding',
+ 'prepare',
+ 'preserve',
+ 'prev',
+ 'primary',
+ 'privilege_checks_user',
+ 'privileges',
+ 'procedure',
+ 'process',
+ 'processlist',
+ 'profile',
+ 'profiles',
+ 'proxy',
+ 'purge',
+ 'quarter',
+ 'query',
+ 'quick',
+ 'random',
+ 'range',
+ 'rank',
+ 'read',
+ 'read_only',
+ 'read_write',
+ 'reads',
+ 'rebuild',
+ 'recover',
+ 'recursive',
+ 'redo_buffer_size',
+ 'redundant',
+ 'reference',
+ 'references',
+ 'regexp',
+ 'registration',
+ 'relay',
+ 'relay_log_file',
+ 'relay_log_pos',
+ 'relay_thread',
+ 'relaylog',
+ 'release',
+ 'reload',
+ 'remove',
+ 'rename',
+ 'reorganize',
+ 'repair',
+ 'repeat',
+ 'repeatable',
+ 'replace',
+ 'replica',
+ 'replicas',
+ 'replicate_do_db',
+ 'replicate_do_table',
+ 'replicate_ignore_db',
+ 'replicate_ignore_table',
+ 'replicate_rewrite_db',
+ 'replicate_wild_do_table',
+ 'replicate_wild_ignore_table',
+ 'replication',
+ 'require',
+ 'require_row_format',
+ 'require_table_primary_key_check',
+ 'reset',
+ 'resignal',
+ 'resource',
+ 'respect',
+ 'restart',
+ 'restore',
+ 'restrict',
+ 'resume',
+ 'retain',
+ 'return',
+ 'returned_sqlstate',
+ 'returning',
+ 'returns',
+ 'reuse',
+ 'reverse',
+ 'revoke',
+ 'right',
+ 'rlike',
+ 'role',
+ 'rollback',
+ 'rollup',
+ 'rotate',
+ 'routine',
+ 'row',
+ 'row_count',
+ 'row_format',
+ 'row_number',
+ 'rows',
+ 'rtree',
+ 'savepoint',
+ 'schedule',
+ 'schema',
+ 'schema_name',
+ 'schemas',
+ 'second',
+ 'second_microsecond',
+ 'secondary',
+ 'secondary_engine',
+ 'secondary_engine_attribute',
+ 'secondary_load',
+ 'secondary_unload',
+ 'security',
+ 'select',
+ 'sensitive',
+ 'separator',
+ 'serializable',
+ 'server',
+ 'session',
+ 'share',
+ 'show',
+ 'shutdown',
+ 'signal',
+ 'signed',
+ 'simple',
+ 'skip',
+ 'slave',
+ 'slow',
+ 'snapshot',
+ 'socket',
+ 'some',
+ 'soname',
+ 'sounds',
+ 'source',
+ 'source_auto_position',
+ 'source_bind',
+ 'source_compression_algorithms',
+ 'source_connect_retry',
+ 'source_connection_auto_failover',
+ 'source_delay',
+ 'source_heartbeat_period',
+ 'source_host',
+ 'source_log_file',
+ 'source_log_pos',
+ 'source_password',
+ 'source_port',
+ 'source_public_key_path',
+ 'source_retry_count',
+ 'source_ssl',
+ 'source_ssl_ca',
+ 'source_ssl_capath',
+ 'source_ssl_cert',
+ 'source_ssl_cipher',
+ 'source_ssl_crl',
+ 'source_ssl_crlpath',
+ 'source_ssl_key',
+ 'source_ssl_verify_server_cert',
+ 'source_tls_ciphersuites',
+ 'source_tls_version',
+ 'source_user',
+ 'source_zstd_compression_level',
+ 'spatial',
+ 'specific',
+ 'sql',
+ 'sql_after_gtids',
+ 'sql_after_mts_gaps',
+ 'sql_before_gtids',
+ 'sql_big_result',
+ 'sql_buffer_result',
+ 'sql_calc_found_rows',
+ 'sql_no_cache',
+ 'sql_small_result',
+ 'sql_thread',
+ 'sql_tsi_day',
+ 'sql_tsi_hour',
+ 'sql_tsi_minute',
+ 'sql_tsi_month',
+ 'sql_tsi_quarter',
+ 'sql_tsi_second',
+ 'sql_tsi_week',
+ 'sql_tsi_year',
+ 'sqlexception',
+ 'sqlstate',
+ 'sqlwarning',
+ 'srid',
+ 'ssl',
+ 'stacked',
+ 'start',
+ 'starting',
+ 'starts',
+ 'stats_auto_recalc',
+ 'stats_persistent',
+ 'stats_sample_pages',
+ 'status',
+ 'stop',
+ 'storage',
+ 'stored',
+ 'straight_join',
+ 'stream',
+ 'string',
+ 'subclass_origin',
+ 'subject',
+ 'subpartition',
+ 'subpartitions',
+ 'super',
+ 'suspend',
+ 'swaps',
+ 'switches',
+ 'system',
+ 'table',
+ 'table_checksum',
+ 'table_name',
+ 'tables',
+ 'tablespace',
+ 'temporary',
+ 'temptable',
+ 'terminated',
+ 'than',
+ 'then',
+ 'thread_priority',
+ 'ties',
+ 'timestampadd',
+ 'timestampdiff',
+ 'tls',
+ 'to',
+ 'trailing',
+ 'transaction',
+ 'trigger',
+ 'triggers',
+ 'true',
+ 'truncate',
+ 'type',
+ 'types',
+ 'unbounded',
+ 'uncommitted',
+ 'undefined',
+ 'undo',
+ 'undo_buffer_size',
+ 'undofile',
+ 'unicode',
+ 'uninstall',
+ 'union',
+ 'unique',
+ 'unknown',
+ 'unlock',
+ 'unregister',
+ 'unsigned',
+ 'until',
+ 'update',
+ 'upgrade',
+ 'usage',
+ 'use',
+ 'use_frm',
+ 'user',
+ 'user_resources',
+ 'using',
+ 'utc_date',
+ 'utc_time',
+ 'utc_timestamp',
+ 'validation',
+ 'value',
+ 'values',
+ 'variables',
+ 'vcpu',
+ 'view',
+ 'virtual',
+ 'visible',
+ 'wait',
+ 'warnings',
+ 'week',
+ 'weight_string',
+ 'when',
+ 'where',
+ 'while',
+ 'window',
+ 'with',
+ 'without',
+ 'work',
+ 'wrapper',
+ 'write',
+ 'x509',
+ 'xa',
+ 'xid',
+ 'xml',
+ 'xor',
+ 'year_month',
+ 'zerofill',
+ 'zone',
+)
+
+
+if __name__ == '__main__': # pragma: no cover
+ import re
+ from urllib.request import urlopen
+
+ from pygments.util import format_lines
+
+ # MySQL source code
+ SOURCE_URL = 'https://github.com/mysql/mysql-server/raw/8.0'
+ LEX_URL = SOURCE_URL + '/sql/lex.h'
+ ITEM_CREATE_URL = SOURCE_URL + '/sql/item_create.cc'
+
+
+ def update_myself():
+ # Pull content from lex.h.
+ lex_file = urlopen(LEX_URL).read().decode('utf8', errors='ignore')
+ keywords = parse_lex_keywords(lex_file)
+ functions = parse_lex_functions(lex_file)
+ optimizer_hints = parse_lex_optimizer_hints(lex_file)
+
+ # Parse content in item_create.cc.
+ item_create_file = urlopen(ITEM_CREATE_URL).read().decode('utf8', errors='ignore')
+ functions.update(parse_item_create_functions(item_create_file))
+
+ # Remove data types from the set of keywords.
+ keywords -= set(MYSQL_DATATYPES)
+
+ update_content('MYSQL_FUNCTIONS', tuple(sorted(functions)))
+ update_content('MYSQL_KEYWORDS', tuple(sorted(keywords)))
+ update_content('MYSQL_OPTIMIZER_HINTS', tuple(sorted(optimizer_hints)))
+
+
+ def parse_lex_keywords(f):
+ """Parse keywords in lex.h."""
+
+ results = set()
+ for m in re.finditer(r'{SYM(?:_HK)?\("(?P<keyword>[a-z0-9_]+)",', f, flags=re.I):
+ results.add(m.group('keyword').lower())
+
+ if not results:
+ raise ValueError('No keywords found')
+
+ return results
+
+
+ def parse_lex_optimizer_hints(f):
+ """Parse optimizer hints in lex.h."""
+
+ results = set()
+ for m in re.finditer(r'{SYM_H\("(?P<keyword>[a-z0-9_]+)",', f, flags=re.I):
+ results.add(m.group('keyword').lower())
+
+ if not results:
+ raise ValueError('No optimizer hints found')
+
+ return results
+
+
+ def parse_lex_functions(f):
+ """Parse MySQL function names from lex.h."""
+
+ results = set()
+ for m in re.finditer(r'{SYM_FN?\("(?P<function>[a-z0-9_]+)",', f, flags=re.I):
+ results.add(m.group('function').lower())
+
+ if not results:
+ raise ValueError('No lex functions found')
+
+ return results
+
+
+ def parse_item_create_functions(f):
+ """Parse MySQL function names from item_create.cc."""
+
+ results = set()
+ for m in re.finditer(r'{"(?P<function>[^"]+?)",\s*SQL_F[^(]+?\(', f, flags=re.I):
+ results.add(m.group('function').lower())
+
+ if not results:
+ raise ValueError('No item_create functions found')
+
+ return results
+
+
+ def update_content(field_name, content):
+ """Overwrite this file with content parsed from MySQL's source code."""
+
+ with open(__file__) as f:
+ data = f.read()
+
+ # Line to start/end inserting
+ re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % field_name, re.M | re.S)
+ m = re_match.search(data)
+ if not m:
+ raise ValueError('Could not find an existing definition for %s' % field_name)
+
+ new_block = format_lines(field_name, content)
+ data = data[:m.start()] + new_block + data[m.end():]
+
+ with open(__file__, 'w', newline='\n') as f:
+ f.write(data)
+
+ update_myself()
diff --git a/pygments/lexers/_openedge_builtins.py b/pygments/lexers/_openedge_builtins.py
new file mode 100644
index 0000000..dee62a9
--- /dev/null
+++ b/pygments/lexers/_openedge_builtins.py
@@ -0,0 +1,2600 @@
+"""
+ pygments.lexers._openedge_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Builtin list for the OpenEdgeLexer.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+OPENEDGEKEYWORDS = (
+ 'ABS',
+ 'ABSO',
+ 'ABSOL',
+ 'ABSOLU',
+ 'ABSOLUT',
+ 'ABSOLUTE',
+ 'ABSTRACT',
+ 'ACCELERATOR',
+ 'ACCUM',
+ 'ACCUMU',
+ 'ACCUMUL',
+ 'ACCUMULA',
+ 'ACCUMULAT',
+ 'ACCUMULATE',
+ 'ACTIVE-FORM',
+ 'ACTIVE-WINDOW',
+ 'ADD',
+ 'ADD-BUFFER',
+ 'ADD-CALC-COLUMN',
+ 'ADD-COLUMNS-FROM',
+ 'ADD-EVENTS-PROCEDURE',
+ 'ADD-FIELDS-FROM',
+ 'ADD-FIRST',
+ 'ADD-INDEX-FIELD',
+ 'ADD-LAST',
+ 'ADD-LIKE-COLUMN',
+ 'ADD-LIKE-FIELD',
+ 'ADD-LIKE-INDEX',
+ 'ADD-NEW-FIELD',
+ 'ADD-NEW-INDEX',
+ 'ADD-SCHEMA-LOCATION',
+ 'ADD-SUPER-PROCEDURE',
+ 'ADM-DATA',
+ 'ADVISE',
+ 'ALERT-BOX',
+ 'ALIAS',
+ 'ALL',
+ 'ALLOW-COLUMN-SEARCHING',
+ 'ALLOW-REPLICATION',
+ 'ALTER',
+ 'ALWAYS-ON-TOP',
+ 'AMBIG',
+ 'AMBIGU',
+ 'AMBIGUO',
+ 'AMBIGUOU',
+ 'AMBIGUOUS',
+ 'ANALYZ',
+ 'ANALYZE',
+ 'AND',
+ 'ANSI-ONLY',
+ 'ANY',
+ 'ANYWHERE',
+ 'APPEND',
+ 'APPL-ALERT',
+ 'APPL-ALERT-',
+ 'APPL-ALERT-B',
+ 'APPL-ALERT-BO',
+ 'APPL-ALERT-BOX',
+ 'APPL-ALERT-BOXE',
+ 'APPL-ALERT-BOXES',
+ 'APPL-CONTEXT-ID',
+ 'APPLICATION',
+ 'APPLY',
+ 'APPSERVER-INFO',
+ 'APPSERVER-PASSWORD',
+ 'APPSERVER-USERID',
+ 'ARRAY-MESSAGE',
+ 'AS',
+ 'ASC',
+ 'ASCE',
+ 'ASCEN',
+ 'ASCEND',
+ 'ASCENDI',
+ 'ASCENDIN',
+ 'ASCENDING',
+ 'ASK-OVERWRITE',
+ 'ASSEMBLY',
+ 'ASSIGN',
+ 'ASYNC-REQUEST-COUNT',
+ 'ASYNC-REQUEST-HANDLE',
+ 'ASYNCHRONOUS',
+ 'AT',
+ 'ATTACHED-PAIRLIST',
+ 'ATTR',
+ 'ATTR-SPACE',
+ 'ATTRI',
+ 'ATTRIB',
+ 'ATTRIBU',
+ 'ATTRIBUT',
+ 'AUDIT-CONTROL',
+ 'AUDIT-ENABLED',
+ 'AUDIT-EVENT-CONTEXT',
+ 'AUDIT-POLICY',
+ 'AUTHENTICATION-FAILED',
+ 'AUTHORIZATION',
+ 'AUTO-COMP',
+ 'AUTO-COMPL',
+ 'AUTO-COMPLE',
+ 'AUTO-COMPLET',
+ 'AUTO-COMPLETI',
+ 'AUTO-COMPLETIO',
+ 'AUTO-COMPLETION',
+ 'AUTO-END-KEY',
+ 'AUTO-ENDKEY',
+ 'AUTO-GO',
+ 'AUTO-IND',
+ 'AUTO-INDE',
+ 'AUTO-INDEN',
+ 'AUTO-INDENT',
+ 'AUTO-RESIZE',
+ 'AUTO-RET',
+ 'AUTO-RETU',
+ 'AUTO-RETUR',
+ 'AUTO-RETURN',
+ 'AUTO-SYNCHRONIZE',
+ 'AUTO-Z',
+ 'AUTO-ZA',
+ 'AUTO-ZAP',
+ 'AUTOMATIC',
+ 'AVAIL',
+ 'AVAILA',
+ 'AVAILAB',
+ 'AVAILABL',
+ 'AVAILABLE',
+ 'AVAILABLE-FORMATS',
+ 'AVE',
+ 'AVER',
+ 'AVERA',
+ 'AVERAG',
+ 'AVERAGE',
+ 'AVG',
+ 'BACK',
+ 'BACKG',
+ 'BACKGR',
+ 'BACKGRO',
+ 'BACKGROU',
+ 'BACKGROUN',
+ 'BACKGROUND',
+ 'BACKWARD',
+ 'BACKWARDS',
+ 'BASE64-DECODE',
+ 'BASE64-ENCODE',
+ 'BASE-ADE',
+ 'BASE-KEY',
+ 'BATCH',
+ 'BATCH-',
+ 'BATCH-M',
+ 'BATCH-MO',
+ 'BATCH-MOD',
+ 'BATCH-MODE',
+ 'BATCH-SIZE',
+ 'BEFORE-H',
+ 'BEFORE-HI',
+ 'BEFORE-HID',
+ 'BEFORE-HIDE',
+ 'BEGIN-EVENT-GROUP',
+ 'BEGINS',
+ 'BELL',
+ 'BETWEEN',
+ 'BGC',
+ 'BGCO',
+ 'BGCOL',
+ 'BGCOLO',
+ 'BGCOLOR',
+ 'BIG-ENDIAN',
+ 'BINARY',
+ 'BIND',
+ 'BIND-WHERE',
+ 'BLANK',
+ 'BLOCK-ITERATION-DISPLAY',
+ 'BLOCK-LEVEL',
+ 'BORDER-B',
+ 'BORDER-BO',
+ 'BORDER-BOT',
+ 'BORDER-BOTT',
+ 'BORDER-BOTTO',
+ 'BORDER-BOTTOM-CHARS',
+ 'BORDER-BOTTOM-P',
+ 'BORDER-BOTTOM-PI',
+ 'BORDER-BOTTOM-PIX',
+ 'BORDER-BOTTOM-PIXE',
+ 'BORDER-BOTTOM-PIXEL',
+ 'BORDER-BOTTOM-PIXELS',
+ 'BORDER-L',
+ 'BORDER-LE',
+ 'BORDER-LEF',
+ 'BORDER-LEFT',
+ 'BORDER-LEFT-',
+ 'BORDER-LEFT-C',
+ 'BORDER-LEFT-CH',
+ 'BORDER-LEFT-CHA',
+ 'BORDER-LEFT-CHAR',
+ 'BORDER-LEFT-CHARS',
+ 'BORDER-LEFT-P',
+ 'BORDER-LEFT-PI',
+ 'BORDER-LEFT-PIX',
+ 'BORDER-LEFT-PIXE',
+ 'BORDER-LEFT-PIXEL',
+ 'BORDER-LEFT-PIXELS',
+ 'BORDER-R',
+ 'BORDER-RI',
+ 'BORDER-RIG',
+ 'BORDER-RIGH',
+ 'BORDER-RIGHT',
+ 'BORDER-RIGHT-',
+ 'BORDER-RIGHT-C',
+ 'BORDER-RIGHT-CH',
+ 'BORDER-RIGHT-CHA',
+ 'BORDER-RIGHT-CHAR',
+ 'BORDER-RIGHT-CHARS',
+ 'BORDER-RIGHT-P',
+ 'BORDER-RIGHT-PI',
+ 'BORDER-RIGHT-PIX',
+ 'BORDER-RIGHT-PIXE',
+ 'BORDER-RIGHT-PIXEL',
+ 'BORDER-RIGHT-PIXELS',
+ 'BORDER-T',
+ 'BORDER-TO',
+ 'BORDER-TOP',
+ 'BORDER-TOP-',
+ 'BORDER-TOP-C',
+ 'BORDER-TOP-CH',
+ 'BORDER-TOP-CHA',
+ 'BORDER-TOP-CHAR',
+ 'BORDER-TOP-CHARS',
+ 'BORDER-TOP-P',
+ 'BORDER-TOP-PI',
+ 'BORDER-TOP-PIX',
+ 'BORDER-TOP-PIXE',
+ 'BORDER-TOP-PIXEL',
+ 'BORDER-TOP-PIXELS',
+ 'BOX',
+ 'BOX-SELECT',
+ 'BOX-SELECTA',
+ 'BOX-SELECTAB',
+ 'BOX-SELECTABL',
+ 'BOX-SELECTABLE',
+ 'BREAK',
+ 'BROWSE',
+ 'BUFFER',
+ 'BUFFER-CHARS',
+ 'BUFFER-COMPARE',
+ 'BUFFER-COPY',
+ 'BUFFER-CREATE',
+ 'BUFFER-DELETE',
+ 'BUFFER-FIELD',
+ 'BUFFER-HANDLE',
+ 'BUFFER-LINES',
+ 'BUFFER-NAME',
+ 'BUFFER-PARTITION-ID',
+ 'BUFFER-RELEASE',
+ 'BUFFER-VALUE',
+ 'BUTTON',
+ 'BUTTONS',
+ 'BY',
+ 'BY-POINTER',
+ 'BY-VARIANT-POINTER',
+ 'CACHE',
+ 'CACHE-SIZE',
+ 'CALL',
+ 'CALL-NAME',
+ 'CALL-TYPE',
+ 'CAN-CREATE',
+ 'CAN-DELETE',
+ 'CAN-DO',
+ 'CAN-DO-DOMAIN-SUPPORT',
+ 'CAN-FIND',
+ 'CAN-QUERY',
+ 'CAN-READ',
+ 'CAN-SET',
+ 'CAN-WRITE',
+ 'CANCEL-BREAK',
+ 'CANCEL-BUTTON',
+ 'CAPS',
+ 'CAREFUL-PAINT',
+ 'CASE',
+ 'CASE-SEN',
+ 'CASE-SENS',
+ 'CASE-SENSI',
+ 'CASE-SENSIT',
+ 'CASE-SENSITI',
+ 'CASE-SENSITIV',
+ 'CASE-SENSITIVE',
+ 'CAST',
+ 'CATCH',
+ 'CDECL',
+ 'CENTER',
+ 'CENTERE',
+ 'CENTERED',
+ 'CHAINED',
+ 'CHARACTER',
+ 'CHARACTER_LENGTH',
+ 'CHARSET',
+ 'CHECK',
+ 'CHECKED',
+ 'CHOOSE',
+ 'CHR',
+ 'CLASS',
+ 'CLASS-TYPE',
+ 'CLEAR',
+ 'CLEAR-APPL-CONTEXT',
+ 'CLEAR-LOG',
+ 'CLEAR-SELECT',
+ 'CLEAR-SELECTI',
+ 'CLEAR-SELECTIO',
+ 'CLEAR-SELECTION',
+ 'CLEAR-SORT-ARROW',
+ 'CLEAR-SORT-ARROWS',
+ 'CLIENT-CONNECTION-ID',
+ 'CLIENT-PRINCIPAL',
+ 'CLIENT-TTY',
+ 'CLIENT-TYPE',
+ 'CLIENT-WORKSTATION',
+ 'CLIPBOARD',
+ 'CLOSE',
+ 'CLOSE-LOG',
+ 'CODE',
+ 'CODEBASE-LOCATOR',
+ 'CODEPAGE',
+ 'CODEPAGE-CONVERT',
+ 'COL',
+ 'COL-OF',
+ 'COLLATE',
+ 'COLON',
+ 'COLON-ALIGN',
+ 'COLON-ALIGNE',
+ 'COLON-ALIGNED',
+ 'COLOR',
+ 'COLOR-TABLE',
+ 'COLU',
+ 'COLUM',
+ 'COLUMN',
+ 'COLUMN-BGCOLOR',
+ 'COLUMN-DCOLOR',
+ 'COLUMN-FGCOLOR',
+ 'COLUMN-FONT',
+ 'COLUMN-LAB',
+ 'COLUMN-LABE',
+ 'COLUMN-LABEL',
+ 'COLUMN-MOVABLE',
+ 'COLUMN-OF',
+ 'COLUMN-PFCOLOR',
+ 'COLUMN-READ-ONLY',
+ 'COLUMN-RESIZABLE',
+ 'COLUMN-SCROLLING',
+ 'COLUMNS',
+ 'COM-HANDLE',
+ 'COM-SELF',
+ 'COMBO-BOX',
+ 'COMMAND',
+ 'COMPARES',
+ 'COMPILE',
+ 'COMPILER',
+ 'COMPLETE',
+ 'CONFIG-NAME',
+ 'CONNECT',
+ 'CONNECTED',
+ 'CONSTRUCTOR',
+ 'CONTAINS',
+ 'CONTENTS',
+ 'CONTEXT',
+ 'CONTEXT-HELP',
+ 'CONTEXT-HELP-FILE',
+ 'CONTEXT-HELP-ID',
+ 'CONTEXT-POPUP',
+ 'CONTROL',
+ 'CONTROL-BOX',
+ 'CONTROL-FRAME',
+ 'CONVERT',
+ 'CONVERT-3D-COLORS',
+ 'CONVERT-TO-OFFS',
+ 'CONVERT-TO-OFFSE',
+ 'CONVERT-TO-OFFSET',
+ 'COPY-DATASET',
+ 'COPY-LOB',
+ 'COPY-SAX-ATTRIBUTES',
+ 'COPY-TEMP-TABLE',
+ 'COUNT',
+ 'COUNT-OF',
+ 'CPCASE',
+ 'CPCOLL',
+ 'CPINTERNAL',
+ 'CPLOG',
+ 'CPPRINT',
+ 'CPRCODEIN',
+ 'CPRCODEOUT',
+ 'CPSTREAM',
+ 'CPTERM',
+ 'CRC-VALUE',
+ 'CREATE',
+ 'CREATE-LIKE',
+ 'CREATE-LIKE-SEQUENTIAL',
+ 'CREATE-NODE-NAMESPACE',
+ 'CREATE-RESULT-LIST-ENTRY',
+ 'CREATE-TEST-FILE',
+ 'CURRENT',
+ 'CURRENT-CHANGED',
+ 'CURRENT-COLUMN',
+ 'CURRENT-ENV',
+ 'CURRENT-ENVI',
+ 'CURRENT-ENVIR',
+ 'CURRENT-ENVIRO',
+ 'CURRENT-ENVIRON',
+ 'CURRENT-ENVIRONM',
+ 'CURRENT-ENVIRONME',
+ 'CURRENT-ENVIRONMEN',
+ 'CURRENT-ENVIRONMENT',
+ 'CURRENT-ITERATION',
+ 'CURRENT-LANG',
+ 'CURRENT-LANGU',
+ 'CURRENT-LANGUA',
+ 'CURRENT-LANGUAG',
+ 'CURRENT-LANGUAGE',
+ 'CURRENT-QUERY',
+ 'CURRENT-REQUEST-INFO',
+ 'CURRENT-RESPONSE-INFO',
+ 'CURRENT-RESULT-ROW',
+ 'CURRENT-ROW-MODIFIED',
+ 'CURRENT-VALUE',
+ 'CURRENT-WINDOW',
+ 'CURRENT_DATE',
+ 'CURS',
+ 'CURSO',
+ 'CURSOR',
+ 'CURSOR-CHAR',
+ 'CURSOR-LINE',
+ 'CURSOR-OFFSET',
+ 'DATA-BIND',
+ 'DATA-ENTRY-RET',
+ 'DATA-ENTRY-RETU',
+ 'DATA-ENTRY-RETUR',
+ 'DATA-ENTRY-RETURN',
+ 'DATA-REL',
+ 'DATA-RELA',
+ 'DATA-RELAT',
+ 'DATA-RELATI',
+ 'DATA-RELATIO',
+ 'DATA-RELATION',
+ 'DATA-SOURCE',
+ 'DATA-SOURCE-COMPLETE-MAP',
+ 'DATA-SOURCE-MODIFIED',
+ 'DATA-SOURCE-ROWID',
+ 'DATA-T',
+ 'DATA-TY',
+ 'DATA-TYP',
+ 'DATA-TYPE',
+ 'DATABASE',
+ 'DATASERVERS',
+ 'DATASET',
+ 'DATASET-HANDLE',
+ 'DATE',
+ 'DATE-F',
+ 'DATE-FO',
+ 'DATE-FOR',
+ 'DATE-FORM',
+ 'DATE-FORMA',
+ 'DATE-FORMAT',
+ 'DAY',
+ 'DB-CONTEXT',
+ 'DB-REFERENCES',
+ 'DBCODEPAGE',
+ 'DBCOLLATION',
+ 'DBNAME',
+ 'DBPARAM',
+ 'DBREST',
+ 'DBRESTR',
+ 'DBRESTRI',
+ 'DBRESTRIC',
+ 'DBRESTRICT',
+ 'DBRESTRICTI',
+ 'DBRESTRICTIO',
+ 'DBRESTRICTION',
+ 'DBRESTRICTIONS',
+ 'DBTASKID',
+ 'DBTYPE',
+ 'DBVERS',
+ 'DBVERSI',
+ 'DBVERSIO',
+ 'DBVERSION',
+ 'DCOLOR',
+ 'DDE',
+ 'DDE-ERROR',
+ 'DDE-I',
+ 'DDE-ID',
+ 'DDE-ITEM',
+ 'DDE-NAME',
+ 'DDE-TOPIC',
+ 'DEBLANK',
+ 'DEBU',
+ 'DEBUG',
+ 'DEBUG-ALERT',
+ 'DEBUG-LIST',
+ 'DEBUGGER',
+ 'DECIMAL',
+ 'DECIMALS',
+ 'DECLARE',
+ 'DECLARE-NAMESPACE',
+ 'DECRYPT',
+ 'DEFAULT',
+ 'DEFAULT-B',
+ 'DEFAULT-BU',
+ 'DEFAULT-BUFFER-HANDLE',
+ 'DEFAULT-BUT',
+ 'DEFAULT-BUTT',
+ 'DEFAULT-BUTTO',
+ 'DEFAULT-BUTTON',
+ 'DEFAULT-COMMIT',
+ 'DEFAULT-EX',
+ 'DEFAULT-EXT',
+ 'DEFAULT-EXTE',
+ 'DEFAULT-EXTEN',
+ 'DEFAULT-EXTENS',
+ 'DEFAULT-EXTENSI',
+ 'DEFAULT-EXTENSIO',
+ 'DEFAULT-EXTENSION',
+ 'DEFAULT-NOXL',
+ 'DEFAULT-NOXLA',
+ 'DEFAULT-NOXLAT',
+ 'DEFAULT-NOXLATE',
+ 'DEFAULT-VALUE',
+ 'DEFAULT-WINDOW',
+ 'DEFINE',
+ 'DEFINE-USER-EVENT-MANAGER',
+ 'DEFINED',
+ 'DEL',
+ 'DELE',
+ 'DELEGATE',
+ 'DELET',
+ 'DELETE PROCEDURE',
+ 'DELETE',
+ 'DELETE-CHAR',
+ 'DELETE-CHARA',
+ 'DELETE-CHARAC',
+ 'DELETE-CHARACT',
+ 'DELETE-CHARACTE',
+ 'DELETE-CHARACTER',
+ 'DELETE-CURRENT-ROW',
+ 'DELETE-LINE',
+ 'DELETE-RESULT-LIST-ENTRY',
+ 'DELETE-SELECTED-ROW',
+ 'DELETE-SELECTED-ROWS',
+ 'DELIMITER',
+ 'DESC',
+ 'DESCE',
+ 'DESCEN',
+ 'DESCEND',
+ 'DESCENDI',
+ 'DESCENDIN',
+ 'DESCENDING',
+ 'DESELECT-FOCUSED-ROW',
+ 'DESELECT-ROWS',
+ 'DESELECT-SELECTED-ROW',
+ 'DESELECTION',
+ 'DESTRUCTOR',
+ 'DIALOG-BOX',
+ 'DICT',
+ 'DICTI',
+ 'DICTIO',
+ 'DICTION',
+ 'DICTIONA',
+ 'DICTIONAR',
+ 'DICTIONARY',
+ 'DIR',
+ 'DISABLE',
+ 'DISABLE-AUTO-ZAP',
+ 'DISABLE-DUMP-TRIGGERS',
+ 'DISABLE-LOAD-TRIGGERS',
+ 'DISABLED',
+ 'DISCON',
+ 'DISCONN',
+ 'DISCONNE',
+ 'DISCONNEC',
+ 'DISCONNECT',
+ 'DISP',
+ 'DISPL',
+ 'DISPLA',
+ 'DISPLAY',
+ 'DISPLAY-MESSAGE',
+ 'DISPLAY-T',
+ 'DISPLAY-TY',
+ 'DISPLAY-TYP',
+ 'DISPLAY-TYPE',
+ 'DISTINCT',
+ 'DO',
+ 'DOMAIN-DESCRIPTION',
+ 'DOMAIN-NAME',
+ 'DOMAIN-TYPE',
+ 'DOS',
+ 'DOUBLE',
+ 'DOWN',
+ 'DRAG-ENABLED',
+ 'DROP',
+ 'DROP-DOWN',
+ 'DROP-DOWN-LIST',
+ 'DROP-FILE-NOTIFY',
+ 'DROP-TARGET',
+ 'DS-CLOSE-CURSOR',
+ 'DSLOG-MANAGER',
+ 'DUMP',
+ 'DYNAMIC',
+ 'DYNAMIC-ENUM',
+ 'DYNAMIC-FUNCTION',
+ 'DYNAMIC-INVOKE',
+ 'EACH',
+ 'ECHO',
+ 'EDGE',
+ 'EDGE-',
+ 'EDGE-C',
+ 'EDGE-CH',
+ 'EDGE-CHA',
+ 'EDGE-CHAR',
+ 'EDGE-CHARS',
+ 'EDGE-P',
+ 'EDGE-PI',
+ 'EDGE-PIX',
+ 'EDGE-PIXE',
+ 'EDGE-PIXEL',
+ 'EDGE-PIXELS',
+ 'EDIT-CAN-PASTE',
+ 'EDIT-CAN-UNDO',
+ 'EDIT-CLEAR',
+ 'EDIT-COPY',
+ 'EDIT-CUT',
+ 'EDIT-PASTE',
+ 'EDIT-UNDO',
+ 'EDITING',
+ 'EDITOR',
+ 'ELSE',
+ 'EMPTY',
+ 'EMPTY-TEMP-TABLE',
+ 'ENABLE',
+ 'ENABLED-FIELDS',
+ 'ENCODE',
+ 'ENCRYPT',
+ 'ENCRYPT-AUDIT-MAC-KEY',
+ 'ENCRYPTION-SALT',
+ 'END',
+ 'END-DOCUMENT',
+ 'END-ELEMENT',
+ 'END-EVENT-GROUP',
+ 'END-FILE-DROP',
+ 'END-KEY',
+ 'END-MOVE',
+ 'END-RESIZE',
+ 'END-ROW-RESIZE',
+ 'END-USER-PROMPT',
+ 'ENDKEY',
+ 'ENTERED',
+ 'ENTITY-EXPANSION-LIMIT',
+ 'ENTRY',
+ 'ENUM',
+ 'EQ',
+ 'ERROR',
+ 'ERROR-COL',
+ 'ERROR-COLU',
+ 'ERROR-COLUM',
+ 'ERROR-COLUMN',
+ 'ERROR-ROW',
+ 'ERROR-STACK-TRACE',
+ 'ERROR-STAT',
+ 'ERROR-STATU',
+ 'ERROR-STATUS',
+ 'ESCAPE',
+ 'ETIME',
+ 'EVENT',
+ 'EVENT-GROUP-ID',
+ 'EVENT-PROCEDURE',
+ 'EVENT-PROCEDURE-CONTEXT',
+ 'EVENT-T',
+ 'EVENT-TY',
+ 'EVENT-TYP',
+ 'EVENT-TYPE',
+ 'EVENTS',
+ 'EXCEPT',
+ 'EXCLUSIVE',
+ 'EXCLUSIVE-',
+ 'EXCLUSIVE-ID',
+ 'EXCLUSIVE-L',
+ 'EXCLUSIVE-LO',
+ 'EXCLUSIVE-LOC',
+ 'EXCLUSIVE-LOCK',
+ 'EXCLUSIVE-WEB-USER',
+ 'EXECUTE',
+ 'EXISTS',
+ 'EXP',
+ 'EXPAND',
+ 'EXPANDABLE',
+ 'EXPLICIT',
+ 'EXPORT',
+ 'EXPORT-PRINCIPAL',
+ 'EXTENDED',
+ 'EXTENT',
+ 'EXTERNAL',
+ 'FALSE',
+ 'FETCH',
+ 'FETCH-SELECTED-ROW',
+ 'FGC',
+ 'FGCO',
+ 'FGCOL',
+ 'FGCOLO',
+ 'FGCOLOR',
+ 'FIELD',
+ 'FIELDS',
+ 'FILE',
+ 'FILE-CREATE-DATE',
+ 'FILE-CREATE-TIME',
+ 'FILE-INFO',
+ 'FILE-INFOR',
+ 'FILE-INFORM',
+ 'FILE-INFORMA',
+ 'FILE-INFORMAT',
+ 'FILE-INFORMATI',
+ 'FILE-INFORMATIO',
+ 'FILE-INFORMATION',
+ 'FILE-MOD-DATE',
+ 'FILE-MOD-TIME',
+ 'FILE-NAME',
+ 'FILE-OFF',
+ 'FILE-OFFS',
+ 'FILE-OFFSE',
+ 'FILE-OFFSET',
+ 'FILE-SIZE',
+ 'FILE-TYPE',
+ 'FILENAME',
+ 'FILL',
+ 'FILL-IN',
+ 'FILLED',
+ 'FILTERS',
+ 'FINAL',
+ 'FINALLY',
+ 'FIND',
+ 'FIND-BY-ROWID',
+ 'FIND-CASE-SENSITIVE',
+ 'FIND-CURRENT',
+ 'FIND-FIRST',
+ 'FIND-GLOBAL',
+ 'FIND-LAST',
+ 'FIND-NEXT-OCCURRENCE',
+ 'FIND-PREV-OCCURRENCE',
+ 'FIND-SELECT',
+ 'FIND-UNIQUE',
+ 'FIND-WRAP-AROUND',
+ 'FINDER',
+ 'FIRST',
+ 'FIRST-ASYNCH-REQUEST',
+ 'FIRST-CHILD',
+ 'FIRST-COLUMN',
+ 'FIRST-FORM',
+ 'FIRST-OBJECT',
+ 'FIRST-OF',
+ 'FIRST-PROC',
+ 'FIRST-PROCE',
+ 'FIRST-PROCED',
+ 'FIRST-PROCEDU',
+ 'FIRST-PROCEDUR',
+ 'FIRST-PROCEDURE',
+ 'FIRST-SERVER',
+ 'FIRST-TAB-I',
+ 'FIRST-TAB-IT',
+ 'FIRST-TAB-ITE',
+ 'FIRST-TAB-ITEM',
+ 'FIT-LAST-COLUMN',
+ 'FIXED-ONLY',
+ 'FLAT-BUTTON',
+ 'FLOAT',
+ 'FOCUS',
+ 'FOCUSED-ROW',
+ 'FOCUSED-ROW-SELECTED',
+ 'FONT',
+ 'FONT-TABLE',
+ 'FOR',
+ 'FORCE-FILE',
+ 'FORE',
+ 'FOREG',
+ 'FOREGR',
+ 'FOREGRO',
+ 'FOREGROU',
+ 'FOREGROUN',
+ 'FOREGROUND',
+ 'FORM INPUT',
+ 'FORM',
+ 'FORM-LONG-INPUT',
+ 'FORMA',
+ 'FORMAT',
+ 'FORMATTE',
+ 'FORMATTED',
+ 'FORWARD',
+ 'FORWARDS',
+ 'FRAGMEN',
+ 'FRAGMENT',
+ 'FRAM',
+ 'FRAME',
+ 'FRAME-COL',
+ 'FRAME-DB',
+ 'FRAME-DOWN',
+ 'FRAME-FIELD',
+ 'FRAME-FILE',
+ 'FRAME-INDE',
+ 'FRAME-INDEX',
+ 'FRAME-LINE',
+ 'FRAME-NAME',
+ 'FRAME-ROW',
+ 'FRAME-SPA',
+ 'FRAME-SPAC',
+ 'FRAME-SPACI',
+ 'FRAME-SPACIN',
+ 'FRAME-SPACING',
+ 'FRAME-VAL',
+ 'FRAME-VALU',
+ 'FRAME-VALUE',
+ 'FRAME-X',
+ 'FRAME-Y',
+ 'FREQUENCY',
+ 'FROM',
+ 'FROM-C',
+ 'FROM-CH',
+ 'FROM-CHA',
+ 'FROM-CHAR',
+ 'FROM-CHARS',
+ 'FROM-CUR',
+ 'FROM-CURR',
+ 'FROM-CURRE',
+ 'FROM-CURREN',
+ 'FROM-CURRENT',
+ 'FROM-P',
+ 'FROM-PI',
+ 'FROM-PIX',
+ 'FROM-PIXE',
+ 'FROM-PIXEL',
+ 'FROM-PIXELS',
+ 'FULL-HEIGHT',
+ 'FULL-HEIGHT-',
+ 'FULL-HEIGHT-C',
+ 'FULL-HEIGHT-CH',
+ 'FULL-HEIGHT-CHA',
+ 'FULL-HEIGHT-CHAR',
+ 'FULL-HEIGHT-CHARS',
+ 'FULL-HEIGHT-P',
+ 'FULL-HEIGHT-PI',
+ 'FULL-HEIGHT-PIX',
+ 'FULL-HEIGHT-PIXE',
+ 'FULL-HEIGHT-PIXEL',
+ 'FULL-HEIGHT-PIXELS',
+ 'FULL-PATHN',
+ 'FULL-PATHNA',
+ 'FULL-PATHNAM',
+ 'FULL-PATHNAME',
+ 'FULL-WIDTH',
+ 'FULL-WIDTH-',
+ 'FULL-WIDTH-C',
+ 'FULL-WIDTH-CH',
+ 'FULL-WIDTH-CHA',
+ 'FULL-WIDTH-CHAR',
+ 'FULL-WIDTH-CHARS',
+ 'FULL-WIDTH-P',
+ 'FULL-WIDTH-PI',
+ 'FULL-WIDTH-PIX',
+ 'FULL-WIDTH-PIXE',
+ 'FULL-WIDTH-PIXEL',
+ 'FULL-WIDTH-PIXELS',
+ 'FUNCTION',
+ 'FUNCTION-CALL-TYPE',
+ 'GATEWAY',
+ 'GATEWAYS',
+ 'GE',
+ 'GENERATE-MD5',
+ 'GENERATE-PBE-KEY',
+ 'GENERATE-PBE-SALT',
+ 'GENERATE-RANDOM-KEY',
+ 'GENERATE-UUID',
+ 'GET',
+ 'GET-ATTR-CALL-TYPE',
+ 'GET-ATTRIBUTE-NODE',
+ 'GET-BINARY-DATA',
+ 'GET-BLUE',
+ 'GET-BLUE-',
+ 'GET-BLUE-V',
+ 'GET-BLUE-VA',
+ 'GET-BLUE-VAL',
+ 'GET-BLUE-VALU',
+ 'GET-BLUE-VALUE',
+ 'GET-BROWSE-COLUMN',
+ 'GET-BUFFER-HANDLE',
+ 'GET-BYTE',
+ 'GET-CALLBACK-PROC-CONTEXT',
+ 'GET-CALLBACK-PROC-NAME',
+ 'GET-CGI-LIST',
+ 'GET-CGI-LONG-VALUE',
+ 'GET-CGI-VALUE',
+ 'GET-CLASS',
+ 'GET-CODEPAGES',
+ 'GET-COLLATIONS',
+ 'GET-CONFIG-VALUE',
+ 'GET-CURRENT',
+ 'GET-DOUBLE',
+ 'GET-DROPPED-FILE',
+ 'GET-DYNAMIC',
+ 'GET-ERROR-COLUMN',
+ 'GET-ERROR-ROW',
+ 'GET-FILE',
+ 'GET-FILE-NAME',
+ 'GET-FILE-OFFSE',
+ 'GET-FILE-OFFSET',
+ 'GET-FIRST',
+ 'GET-FLOAT',
+ 'GET-GREEN',
+ 'GET-GREEN-',
+ 'GET-GREEN-V',
+ 'GET-GREEN-VA',
+ 'GET-GREEN-VAL',
+ 'GET-GREEN-VALU',
+ 'GET-GREEN-VALUE',
+ 'GET-INDEX-BY-NAMESPACE-NAME',
+ 'GET-INDEX-BY-QNAME',
+ 'GET-INT64',
+ 'GET-ITERATION',
+ 'GET-KEY-VAL',
+ 'GET-KEY-VALU',
+ 'GET-KEY-VALUE',
+ 'GET-LAST',
+ 'GET-LOCALNAME-BY-INDEX',
+ 'GET-LONG',
+ 'GET-MESSAGE',
+ 'GET-NEXT',
+ 'GET-NUMBER',
+ 'GET-POINTER-VALUE',
+ 'GET-PREV',
+ 'GET-PRINTERS',
+ 'GET-PROPERTY',
+ 'GET-QNAME-BY-INDEX',
+ 'GET-RED',
+ 'GET-RED-',
+ 'GET-RED-V',
+ 'GET-RED-VA',
+ 'GET-RED-VAL',
+ 'GET-RED-VALU',
+ 'GET-RED-VALUE',
+ 'GET-REPOSITIONED-ROW',
+ 'GET-RGB-VALUE',
+ 'GET-SELECTED',
+ 'GET-SELECTED-',
+ 'GET-SELECTED-W',
+ 'GET-SELECTED-WI',
+ 'GET-SELECTED-WID',
+ 'GET-SELECTED-WIDG',
+ 'GET-SELECTED-WIDGE',
+ 'GET-SELECTED-WIDGET',
+ 'GET-SHORT',
+ 'GET-SIGNATURE',
+ 'GET-SIZE',
+ 'GET-STRING',
+ 'GET-TAB-ITEM',
+ 'GET-TEXT-HEIGHT',
+ 'GET-TEXT-HEIGHT-',
+ 'GET-TEXT-HEIGHT-C',
+ 'GET-TEXT-HEIGHT-CH',
+ 'GET-TEXT-HEIGHT-CHA',
+ 'GET-TEXT-HEIGHT-CHAR',
+ 'GET-TEXT-HEIGHT-CHARS',
+ 'GET-TEXT-HEIGHT-P',
+ 'GET-TEXT-HEIGHT-PI',
+ 'GET-TEXT-HEIGHT-PIX',
+ 'GET-TEXT-HEIGHT-PIXE',
+ 'GET-TEXT-HEIGHT-PIXEL',
+ 'GET-TEXT-HEIGHT-PIXELS',
+ 'GET-TEXT-WIDTH',
+ 'GET-TEXT-WIDTH-',
+ 'GET-TEXT-WIDTH-C',
+ 'GET-TEXT-WIDTH-CH',
+ 'GET-TEXT-WIDTH-CHA',
+ 'GET-TEXT-WIDTH-CHAR',
+ 'GET-TEXT-WIDTH-CHARS',
+ 'GET-TEXT-WIDTH-P',
+ 'GET-TEXT-WIDTH-PI',
+ 'GET-TEXT-WIDTH-PIX',
+ 'GET-TEXT-WIDTH-PIXE',
+ 'GET-TEXT-WIDTH-PIXEL',
+ 'GET-TEXT-WIDTH-PIXELS',
+ 'GET-TYPE-BY-INDEX',
+ 'GET-TYPE-BY-NAMESPACE-NAME',
+ 'GET-TYPE-BY-QNAME',
+ 'GET-UNSIGNED-LONG',
+ 'GET-UNSIGNED-SHORT',
+ 'GET-URI-BY-INDEX',
+ 'GET-VALUE-BY-INDEX',
+ 'GET-VALUE-BY-NAMESPACE-NAME',
+ 'GET-VALUE-BY-QNAME',
+ 'GET-WAIT-STATE',
+ 'GETBYTE',
+ 'GLOBAL',
+ 'GO-ON',
+ 'GO-PEND',
+ 'GO-PENDI',
+ 'GO-PENDIN',
+ 'GO-PENDING',
+ 'GRANT',
+ 'GRAPHIC-E',
+ 'GRAPHIC-ED',
+ 'GRAPHIC-EDG',
+ 'GRAPHIC-EDGE',
+ 'GRID-FACTOR-H',
+ 'GRID-FACTOR-HO',
+ 'GRID-FACTOR-HOR',
+ 'GRID-FACTOR-HORI',
+ 'GRID-FACTOR-HORIZ',
+ 'GRID-FACTOR-HORIZO',
+ 'GRID-FACTOR-HORIZON',
+ 'GRID-FACTOR-HORIZONT',
+ 'GRID-FACTOR-HORIZONTA',
+ 'GRID-FACTOR-HORIZONTAL',
+ 'GRID-FACTOR-V',
+ 'GRID-FACTOR-VE',
+ 'GRID-FACTOR-VER',
+ 'GRID-FACTOR-VERT',
+ 'GRID-FACTOR-VERTI',
+ 'GRID-FACTOR-VERTIC',
+ 'GRID-FACTOR-VERTICA',
+ 'GRID-FACTOR-VERTICAL',
+ 'GRID-SNAP',
+ 'GRID-UNIT-HEIGHT',
+ 'GRID-UNIT-HEIGHT-',
+ 'GRID-UNIT-HEIGHT-C',
+ 'GRID-UNIT-HEIGHT-CH',
+ 'GRID-UNIT-HEIGHT-CHA',
+ 'GRID-UNIT-HEIGHT-CHARS',
+ 'GRID-UNIT-HEIGHT-P',
+ 'GRID-UNIT-HEIGHT-PI',
+ 'GRID-UNIT-HEIGHT-PIX',
+ 'GRID-UNIT-HEIGHT-PIXE',
+ 'GRID-UNIT-HEIGHT-PIXEL',
+ 'GRID-UNIT-HEIGHT-PIXELS',
+ 'GRID-UNIT-WIDTH',
+ 'GRID-UNIT-WIDTH-',
+ 'GRID-UNIT-WIDTH-C',
+ 'GRID-UNIT-WIDTH-CH',
+ 'GRID-UNIT-WIDTH-CHA',
+ 'GRID-UNIT-WIDTH-CHAR',
+ 'GRID-UNIT-WIDTH-CHARS',
+ 'GRID-UNIT-WIDTH-P',
+ 'GRID-UNIT-WIDTH-PI',
+ 'GRID-UNIT-WIDTH-PIX',
+ 'GRID-UNIT-WIDTH-PIXE',
+ 'GRID-UNIT-WIDTH-PIXEL',
+ 'GRID-UNIT-WIDTH-PIXELS',
+ 'GRID-VISIBLE',
+ 'GROUP',
+ 'GT',
+ 'GUID',
+ 'HANDLE',
+ 'HANDLER',
+ 'HAS-RECORDS',
+ 'HAVING',
+ 'HEADER',
+ 'HEIGHT',
+ 'HEIGHT-',
+ 'HEIGHT-C',
+ 'HEIGHT-CH',
+ 'HEIGHT-CHA',
+ 'HEIGHT-CHAR',
+ 'HEIGHT-CHARS',
+ 'HEIGHT-P',
+ 'HEIGHT-PI',
+ 'HEIGHT-PIX',
+ 'HEIGHT-PIXE',
+ 'HEIGHT-PIXEL',
+ 'HEIGHT-PIXELS',
+ 'HELP',
+ 'HEX-DECODE',
+ 'HEX-ENCODE',
+ 'HIDDEN',
+ 'HIDE',
+ 'HORI',
+ 'HORIZ',
+ 'HORIZO',
+ 'HORIZON',
+ 'HORIZONT',
+ 'HORIZONTA',
+ 'HORIZONTAL',
+ 'HOST-BYTE-ORDER',
+ 'HTML-CHARSET',
+ 'HTML-END-OF-LINE',
+ 'HTML-END-OF-PAGE',
+ 'HTML-FRAME-BEGIN',
+ 'HTML-FRAME-END',
+ 'HTML-HEADER-BEGIN',
+ 'HTML-HEADER-END',
+ 'HTML-TITLE-BEGIN',
+ 'HTML-TITLE-END',
+ 'HWND',
+ 'ICON',
+ 'IF',
+ 'IMAGE',
+ 'IMAGE-DOWN',
+ 'IMAGE-INSENSITIVE',
+ 'IMAGE-SIZE',
+ 'IMAGE-SIZE-C',
+ 'IMAGE-SIZE-CH',
+ 'IMAGE-SIZE-CHA',
+ 'IMAGE-SIZE-CHAR',
+ 'IMAGE-SIZE-CHARS',
+ 'IMAGE-SIZE-P',
+ 'IMAGE-SIZE-PI',
+ 'IMAGE-SIZE-PIX',
+ 'IMAGE-SIZE-PIXE',
+ 'IMAGE-SIZE-PIXEL',
+ 'IMAGE-SIZE-PIXELS',
+ 'IMAGE-UP',
+ 'IMMEDIATE-DISPLAY',
+ 'IMPLEMENTS',
+ 'IMPORT',
+ 'IMPORT-PRINCIPAL',
+ 'IN',
+ 'IN-HANDLE',
+ 'INCREMENT-EXCLUSIVE-ID',
+ 'INDEX',
+ 'INDEX-HINT',
+ 'INDEX-INFORMATION',
+ 'INDEXED-REPOSITION',
+ 'INDICATOR',
+ 'INFO',
+ 'INFOR',
+ 'INFORM',
+ 'INFORMA',
+ 'INFORMAT',
+ 'INFORMATI',
+ 'INFORMATIO',
+ 'INFORMATION',
+ 'INHERIT-BGC',
+ 'INHERIT-BGCO',
+ 'INHERIT-BGCOL',
+ 'INHERIT-BGCOLO',
+ 'INHERIT-BGCOLOR',
+ 'INHERIT-FGC',
+ 'INHERIT-FGCO',
+ 'INHERIT-FGCOL',
+ 'INHERIT-FGCOLO',
+ 'INHERIT-FGCOLOR',
+ 'INHERITS',
+ 'INIT',
+ 'INITI',
+ 'INITIA',
+ 'INITIAL',
+ 'INITIAL-DIR',
+ 'INITIAL-FILTER',
+ 'INITIALIZE-DOCUMENT-TYPE',
+ 'INITIATE',
+ 'INNER-CHARS',
+ 'INNER-LINES',
+ 'INPUT',
+ 'INPUT-O',
+ 'INPUT-OU',
+ 'INPUT-OUT',
+ 'INPUT-OUTP',
+ 'INPUT-OUTPU',
+ 'INPUT-OUTPUT',
+ 'INPUT-VALUE',
+ 'INSERT',
+ 'INSERT-ATTRIBUTE',
+ 'INSERT-B',
+ 'INSERT-BA',
+ 'INSERT-BAC',
+ 'INSERT-BACK',
+ 'INSERT-BACKT',
+ 'INSERT-BACKTA',
+ 'INSERT-BACKTAB',
+ 'INSERT-FILE',
+ 'INSERT-ROW',
+ 'INSERT-STRING',
+ 'INSERT-T',
+ 'INSERT-TA',
+ 'INSERT-TAB',
+ 'INT64',
+ 'INT',
+ 'INTEGER',
+ 'INTERFACE',
+ 'INTERNAL-ENTRIES',
+ 'INTO',
+ 'INVOKE',
+ 'IS',
+ 'IS-ATTR',
+ 'IS-ATTR-',
+ 'IS-ATTR-S',
+ 'IS-ATTR-SP',
+ 'IS-ATTR-SPA',
+ 'IS-ATTR-SPAC',
+ 'IS-ATTR-SPACE',
+ 'IS-CLASS',
+ 'IS-JSON',
+ 'IS-LEAD-BYTE',
+ 'IS-OPEN',
+ 'IS-PARAMETER-SET',
+ 'IS-PARTITIONED',
+ 'IS-ROW-SELECTED',
+ 'IS-SELECTED',
+ 'IS-XML',
+ 'ITEM',
+ 'ITEMS-PER-ROW',
+ 'JOIN',
+ 'JOIN-BY-SQLDB',
+ 'KBLABEL',
+ 'KEEP-CONNECTION-OPEN',
+ 'KEEP-FRAME-Z',
+ 'KEEP-FRAME-Z-',
+ 'KEEP-FRAME-Z-O',
+ 'KEEP-FRAME-Z-OR',
+ 'KEEP-FRAME-Z-ORD',
+ 'KEEP-FRAME-Z-ORDE',
+ 'KEEP-FRAME-Z-ORDER',
+ 'KEEP-MESSAGES',
+ 'KEEP-SECURITY-CACHE',
+ 'KEEP-TAB-ORDER',
+ 'KEY',
+ 'KEY-CODE',
+ 'KEY-FUNC',
+ 'KEY-FUNCT',
+ 'KEY-FUNCTI',
+ 'KEY-FUNCTIO',
+ 'KEY-FUNCTION',
+ 'KEY-LABEL',
+ 'KEYCODE',
+ 'KEYFUNC',
+ 'KEYFUNCT',
+ 'KEYFUNCTI',
+ 'KEYFUNCTIO',
+ 'KEYFUNCTION',
+ 'KEYLABEL',
+ 'KEYS',
+ 'KEYWORD',
+ 'KEYWORD-ALL',
+ 'LABEL',
+ 'LABEL-BGC',
+ 'LABEL-BGCO',
+ 'LABEL-BGCOL',
+ 'LABEL-BGCOLO',
+ 'LABEL-BGCOLOR',
+ 'LABEL-DC',
+ 'LABEL-DCO',
+ 'LABEL-DCOL',
+ 'LABEL-DCOLO',
+ 'LABEL-DCOLOR',
+ 'LABEL-FGC',
+ 'LABEL-FGCO',
+ 'LABEL-FGCOL',
+ 'LABEL-FGCOLO',
+ 'LABEL-FGCOLOR',
+ 'LABEL-FONT',
+ 'LABEL-PFC',
+ 'LABEL-PFCO',
+ 'LABEL-PFCOL',
+ 'LABEL-PFCOLO',
+ 'LABEL-PFCOLOR',
+ 'LABELS',
+ 'LABELS-HAVE-COLONS',
+ 'LANDSCAPE',
+ 'LANGUAGE',
+ 'LANGUAGES',
+ 'LARGE',
+ 'LARGE-TO-SMALL',
+ 'LAST',
+ 'LAST-ASYNCH-REQUEST',
+ 'LAST-BATCH',
+ 'LAST-CHILD',
+ 'LAST-EVEN',
+ 'LAST-EVENT',
+ 'LAST-FORM',
+ 'LAST-KEY',
+ 'LAST-OBJECT',
+ 'LAST-OF',
+ 'LAST-PROCE',
+ 'LAST-PROCED',
+ 'LAST-PROCEDU',
+ 'LAST-PROCEDUR',
+ 'LAST-PROCEDURE',
+ 'LAST-SERVER',
+ 'LAST-TAB-I',
+ 'LAST-TAB-IT',
+ 'LAST-TAB-ITE',
+ 'LAST-TAB-ITEM',
+ 'LASTKEY',
+ 'LC',
+ 'LDBNAME',
+ 'LE',
+ 'LEAVE',
+ 'LEFT-ALIGN',
+ 'LEFT-ALIGNE',
+ 'LEFT-ALIGNED',
+ 'LEFT-TRIM',
+ 'LENGTH',
+ 'LIBRARY',
+ 'LIKE',
+ 'LIKE-SEQUENTIAL',
+ 'LINE',
+ 'LINE-COUNT',
+ 'LINE-COUNTE',
+ 'LINE-COUNTER',
+ 'LIST-EVENTS',
+ 'LIST-ITEM-PAIRS',
+ 'LIST-ITEMS',
+ 'LIST-PROPERTY-NAMES',
+ 'LIST-QUERY-ATTRS',
+ 'LIST-SET-ATTRS',
+ 'LIST-WIDGETS',
+ 'LISTI',
+ 'LISTIN',
+ 'LISTING',
+ 'LITERAL-QUESTION',
+ 'LITTLE-ENDIAN',
+ 'LOAD',
+ 'LOAD-DOMAINS',
+ 'LOAD-ICON',
+ 'LOAD-IMAGE',
+ 'LOAD-IMAGE-DOWN',
+ 'LOAD-IMAGE-INSENSITIVE',
+ 'LOAD-IMAGE-UP',
+ 'LOAD-MOUSE-P',
+ 'LOAD-MOUSE-PO',
+ 'LOAD-MOUSE-POI',
+ 'LOAD-MOUSE-POIN',
+ 'LOAD-MOUSE-POINT',
+ 'LOAD-MOUSE-POINTE',
+ 'LOAD-MOUSE-POINTER',
+ 'LOAD-PICTURE',
+ 'LOAD-SMALL-ICON',
+ 'LOCAL-NAME',
+ 'LOCAL-VERSION-INFO',
+ 'LOCATOR-COLUMN-NUMBER',
+ 'LOCATOR-LINE-NUMBER',
+ 'LOCATOR-PUBLIC-ID',
+ 'LOCATOR-SYSTEM-ID',
+ 'LOCATOR-TYPE',
+ 'LOCK-REGISTRATION',
+ 'LOCKED',
+ 'LOG',
+ 'LOG-AUDIT-EVENT',
+ 'LOG-MANAGER',
+ 'LOGICAL',
+ 'LOGIN-EXPIRATION-TIMESTAMP',
+ 'LOGIN-HOST',
+ 'LOGIN-STATE',
+ 'LOGOUT',
+ 'LONGCHAR',
+ 'LOOKAHEAD',
+ 'LOOKUP',
+ 'LT',
+ 'MACHINE-CLASS',
+ 'MANDATORY',
+ 'MANUAL-HIGHLIGHT',
+ 'MAP',
+ 'MARGIN-EXTRA',
+ 'MARGIN-HEIGHT',
+ 'MARGIN-HEIGHT-',
+ 'MARGIN-HEIGHT-C',
+ 'MARGIN-HEIGHT-CH',
+ 'MARGIN-HEIGHT-CHA',
+ 'MARGIN-HEIGHT-CHAR',
+ 'MARGIN-HEIGHT-CHARS',
+ 'MARGIN-HEIGHT-P',
+ 'MARGIN-HEIGHT-PI',
+ 'MARGIN-HEIGHT-PIX',
+ 'MARGIN-HEIGHT-PIXE',
+ 'MARGIN-HEIGHT-PIXEL',
+ 'MARGIN-HEIGHT-PIXELS',
+ 'MARGIN-WIDTH',
+ 'MARGIN-WIDTH-',
+ 'MARGIN-WIDTH-C',
+ 'MARGIN-WIDTH-CH',
+ 'MARGIN-WIDTH-CHA',
+ 'MARGIN-WIDTH-CHAR',
+ 'MARGIN-WIDTH-CHARS',
+ 'MARGIN-WIDTH-P',
+ 'MARGIN-WIDTH-PI',
+ 'MARGIN-WIDTH-PIX',
+ 'MARGIN-WIDTH-PIXE',
+ 'MARGIN-WIDTH-PIXEL',
+ 'MARGIN-WIDTH-PIXELS',
+ 'MARK-NEW',
+ 'MARK-ROW-STATE',
+ 'MATCHES',
+ 'MAX',
+ 'MAX-BUTTON',
+ 'MAX-CHARS',
+ 'MAX-DATA-GUESS',
+ 'MAX-HEIGHT',
+ 'MAX-HEIGHT-C',
+ 'MAX-HEIGHT-CH',
+ 'MAX-HEIGHT-CHA',
+ 'MAX-HEIGHT-CHAR',
+ 'MAX-HEIGHT-CHARS',
+ 'MAX-HEIGHT-P',
+ 'MAX-HEIGHT-PI',
+ 'MAX-HEIGHT-PIX',
+ 'MAX-HEIGHT-PIXE',
+ 'MAX-HEIGHT-PIXEL',
+ 'MAX-HEIGHT-PIXELS',
+ 'MAX-ROWS',
+ 'MAX-SIZE',
+ 'MAX-VAL',
+ 'MAX-VALU',
+ 'MAX-VALUE',
+ 'MAX-WIDTH',
+ 'MAX-WIDTH-',
+ 'MAX-WIDTH-C',
+ 'MAX-WIDTH-CH',
+ 'MAX-WIDTH-CHA',
+ 'MAX-WIDTH-CHAR',
+ 'MAX-WIDTH-CHARS',
+ 'MAX-WIDTH-P',
+ 'MAX-WIDTH-PI',
+ 'MAX-WIDTH-PIX',
+ 'MAX-WIDTH-PIXE',
+ 'MAX-WIDTH-PIXEL',
+ 'MAX-WIDTH-PIXELS',
+ 'MAXI',
+ 'MAXIM',
+ 'MAXIMIZE',
+ 'MAXIMU',
+ 'MAXIMUM',
+ 'MAXIMUM-LEVEL',
+ 'MD5-DIGEST',
+ 'MEMBER',
+ 'MEMPTR-TO-NODE-VALUE',
+ 'MENU',
+ 'MENU-BAR',
+ 'MENU-ITEM',
+ 'MENU-K',
+ 'MENU-KE',
+ 'MENU-KEY',
+ 'MENU-M',
+ 'MENU-MO',
+ 'MENU-MOU',
+ 'MENU-MOUS',
+ 'MENU-MOUSE',
+ 'MENUBAR',
+ 'MERGE-BY-FIELD',
+ 'MESSAGE',
+ 'MESSAGE-AREA',
+ 'MESSAGE-AREA-FONT',
+ 'MESSAGE-LINES',
+ 'METHOD',
+ 'MIN',
+ 'MIN-BUTTON',
+ 'MIN-COLUMN-WIDTH-C',
+ 'MIN-COLUMN-WIDTH-CH',
+ 'MIN-COLUMN-WIDTH-CHA',
+ 'MIN-COLUMN-WIDTH-CHAR',
+ 'MIN-COLUMN-WIDTH-CHARS',
+ 'MIN-COLUMN-WIDTH-P',
+ 'MIN-COLUMN-WIDTH-PI',
+ 'MIN-COLUMN-WIDTH-PIX',
+ 'MIN-COLUMN-WIDTH-PIXE',
+ 'MIN-COLUMN-WIDTH-PIXEL',
+ 'MIN-COLUMN-WIDTH-PIXELS',
+ 'MIN-HEIGHT',
+ 'MIN-HEIGHT-',
+ 'MIN-HEIGHT-C',
+ 'MIN-HEIGHT-CH',
+ 'MIN-HEIGHT-CHA',
+ 'MIN-HEIGHT-CHAR',
+ 'MIN-HEIGHT-CHARS',
+ 'MIN-HEIGHT-P',
+ 'MIN-HEIGHT-PI',
+ 'MIN-HEIGHT-PIX',
+ 'MIN-HEIGHT-PIXE',
+ 'MIN-HEIGHT-PIXEL',
+ 'MIN-HEIGHT-PIXELS',
+ 'MIN-SIZE',
+ 'MIN-VAL',
+ 'MIN-VALU',
+ 'MIN-VALUE',
+ 'MIN-WIDTH',
+ 'MIN-WIDTH-',
+ 'MIN-WIDTH-C',
+ 'MIN-WIDTH-CH',
+ 'MIN-WIDTH-CHA',
+ 'MIN-WIDTH-CHAR',
+ 'MIN-WIDTH-CHARS',
+ 'MIN-WIDTH-P',
+ 'MIN-WIDTH-PI',
+ 'MIN-WIDTH-PIX',
+ 'MIN-WIDTH-PIXE',
+ 'MIN-WIDTH-PIXEL',
+ 'MIN-WIDTH-PIXELS',
+ 'MINI',
+ 'MINIM',
+ 'MINIMU',
+ 'MINIMUM',
+ 'MOD',
+ 'MODIFIED',
+ 'MODU',
+ 'MODUL',
+ 'MODULO',
+ 'MONTH',
+ 'MOUSE',
+ 'MOUSE-P',
+ 'MOUSE-PO',
+ 'MOUSE-POI',
+ 'MOUSE-POIN',
+ 'MOUSE-POINT',
+ 'MOUSE-POINTE',
+ 'MOUSE-POINTER',
+ 'MOVABLE',
+ 'MOVE-AFTER',
+ 'MOVE-AFTER-',
+ 'MOVE-AFTER-T',
+ 'MOVE-AFTER-TA',
+ 'MOVE-AFTER-TAB',
+ 'MOVE-AFTER-TAB-',
+ 'MOVE-AFTER-TAB-I',
+ 'MOVE-AFTER-TAB-IT',
+ 'MOVE-AFTER-TAB-ITE',
+ 'MOVE-AFTER-TAB-ITEM',
+ 'MOVE-BEFOR',
+ 'MOVE-BEFORE',
+ 'MOVE-BEFORE-',
+ 'MOVE-BEFORE-T',
+ 'MOVE-BEFORE-TA',
+ 'MOVE-BEFORE-TAB',
+ 'MOVE-BEFORE-TAB-',
+ 'MOVE-BEFORE-TAB-I',
+ 'MOVE-BEFORE-TAB-IT',
+ 'MOVE-BEFORE-TAB-ITE',
+ 'MOVE-BEFORE-TAB-ITEM',
+ 'MOVE-COL',
+ 'MOVE-COLU',
+ 'MOVE-COLUM',
+ 'MOVE-COLUMN',
+ 'MOVE-TO-B',
+ 'MOVE-TO-BO',
+ 'MOVE-TO-BOT',
+ 'MOVE-TO-BOTT',
+ 'MOVE-TO-BOTTO',
+ 'MOVE-TO-BOTTOM',
+ 'MOVE-TO-EOF',
+ 'MOVE-TO-T',
+ 'MOVE-TO-TO',
+ 'MOVE-TO-TOP',
+ 'MPE',
+ 'MTIME',
+ 'MULTI-COMPILE',
+ 'MULTIPLE',
+ 'MULTIPLE-KEY',
+ 'MULTITASKING-INTERVAL',
+ 'MUST-EXIST',
+ 'NAME',
+ 'NAMESPACE-PREFIX',
+ 'NAMESPACE-URI',
+ 'NATIVE',
+ 'NE',
+ 'NEEDS-APPSERVER-PROMPT',
+ 'NEEDS-PROMPT',
+ 'NEW',
+ 'NEW-INSTANCE',
+ 'NEW-ROW',
+ 'NEXT',
+ 'NEXT-COLUMN',
+ 'NEXT-PROMPT',
+ 'NEXT-ROWID',
+ 'NEXT-SIBLING',
+ 'NEXT-TAB-I',
+ 'NEXT-TAB-IT',
+ 'NEXT-TAB-ITE',
+ 'NEXT-TAB-ITEM',
+ 'NEXT-VALUE',
+ 'NO',
+ 'NO-APPLY',
+ 'NO-ARRAY-MESSAGE',
+ 'NO-ASSIGN',
+ 'NO-ATTR',
+ 'NO-ATTR-',
+ 'NO-ATTR-L',
+ 'NO-ATTR-LI',
+ 'NO-ATTR-LIS',
+ 'NO-ATTR-LIST',
+ 'NO-ATTR-S',
+ 'NO-ATTR-SP',
+ 'NO-ATTR-SPA',
+ 'NO-ATTR-SPAC',
+ 'NO-ATTR-SPACE',
+ 'NO-AUTO-VALIDATE',
+ 'NO-BIND-WHERE',
+ 'NO-BOX',
+ 'NO-CONSOLE',
+ 'NO-CONVERT',
+ 'NO-CONVERT-3D-COLORS',
+ 'NO-CURRENT-VALUE',
+ 'NO-DEBUG',
+ 'NO-DRAG',
+ 'NO-ECHO',
+ 'NO-EMPTY-SPACE',
+ 'NO-ERROR',
+ 'NO-F',
+ 'NO-FI',
+ 'NO-FIL',
+ 'NO-FILL',
+ 'NO-FOCUS',
+ 'NO-HELP',
+ 'NO-HIDE',
+ 'NO-INDEX-HINT',
+ 'NO-INHERIT-BGC',
+ 'NO-INHERIT-BGCO',
+ 'NO-INHERIT-BGCOLOR',
+ 'NO-INHERIT-FGC',
+ 'NO-INHERIT-FGCO',
+ 'NO-INHERIT-FGCOL',
+ 'NO-INHERIT-FGCOLO',
+ 'NO-INHERIT-FGCOLOR',
+ 'NO-JOIN-BY-SQLDB',
+ 'NO-LABE',
+ 'NO-LABELS',
+ 'NO-LOBS',
+ 'NO-LOCK',
+ 'NO-LOOKAHEAD',
+ 'NO-MAP',
+ 'NO-MES',
+ 'NO-MESS',
+ 'NO-MESSA',
+ 'NO-MESSAG',
+ 'NO-MESSAGE',
+ 'NO-PAUSE',
+ 'NO-PREFE',
+ 'NO-PREFET',
+ 'NO-PREFETC',
+ 'NO-PREFETCH',
+ 'NO-ROW-MARKERS',
+ 'NO-SCROLLBAR-VERTICAL',
+ 'NO-SEPARATE-CONNECTION',
+ 'NO-SEPARATORS',
+ 'NO-TAB-STOP',
+ 'NO-UND',
+ 'NO-UNDE',
+ 'NO-UNDER',
+ 'NO-UNDERL',
+ 'NO-UNDERLI',
+ 'NO-UNDERLIN',
+ 'NO-UNDERLINE',
+ 'NO-UNDO',
+ 'NO-VAL',
+ 'NO-VALI',
+ 'NO-VALID',
+ 'NO-VALIDA',
+ 'NO-VALIDAT',
+ 'NO-VALIDATE',
+ 'NO-WAIT',
+ 'NO-WORD-WRAP',
+ 'NODE-VALUE-TO-MEMPTR',
+ 'NONAMESPACE-SCHEMA-LOCATION',
+ 'NONE',
+ 'NORMALIZE',
+ 'NOT',
+ 'NOT-ACTIVE',
+ 'NOW',
+ 'NULL',
+ 'NUM-ALI',
+ 'NUM-ALIA',
+ 'NUM-ALIAS',
+ 'NUM-ALIASE',
+ 'NUM-ALIASES',
+ 'NUM-BUFFERS',
+ 'NUM-BUT',
+ 'NUM-BUTT',
+ 'NUM-BUTTO',
+ 'NUM-BUTTON',
+ 'NUM-BUTTONS',
+ 'NUM-COL',
+ 'NUM-COLU',
+ 'NUM-COLUM',
+ 'NUM-COLUMN',
+ 'NUM-COLUMNS',
+ 'NUM-COPIES',
+ 'NUM-DBS',
+ 'NUM-DROPPED-FILES',
+ 'NUM-ENTRIES',
+ 'NUM-FIELDS',
+ 'NUM-FORMATS',
+ 'NUM-ITEMS',
+ 'NUM-ITERATIONS',
+ 'NUM-LINES',
+ 'NUM-LOCKED-COL',
+ 'NUM-LOCKED-COLU',
+ 'NUM-LOCKED-COLUM',
+ 'NUM-LOCKED-COLUMN',
+ 'NUM-LOCKED-COLUMNS',
+ 'NUM-MESSAGES',
+ 'NUM-PARAMETERS',
+ 'NUM-REFERENCES',
+ 'NUM-REPLACED',
+ 'NUM-RESULTS',
+ 'NUM-SELECTED',
+ 'NUM-SELECTED-',
+ 'NUM-SELECTED-ROWS',
+ 'NUM-SELECTED-W',
+ 'NUM-SELECTED-WI',
+ 'NUM-SELECTED-WID',
+ 'NUM-SELECTED-WIDG',
+ 'NUM-SELECTED-WIDGE',
+ 'NUM-SELECTED-WIDGET',
+ 'NUM-SELECTED-WIDGETS',
+ 'NUM-TABS',
+ 'NUM-TO-RETAIN',
+ 'NUM-VISIBLE-COLUMNS',
+ 'NUMERIC',
+ 'NUMERIC-F',
+ 'NUMERIC-FO',
+ 'NUMERIC-FOR',
+ 'NUMERIC-FORM',
+ 'NUMERIC-FORMA',
+ 'NUMERIC-FORMAT',
+ 'OCTET-LENGTH',
+ 'OF',
+ 'OFF',
+ 'OK',
+ 'OK-CANCEL',
+ 'OLD',
+ 'ON',
+ 'ON-FRAME',
+ 'ON-FRAME-',
+ 'ON-FRAME-B',
+ 'ON-FRAME-BO',
+ 'ON-FRAME-BOR',
+ 'ON-FRAME-BORD',
+ 'ON-FRAME-BORDE',
+ 'ON-FRAME-BORDER',
+ 'OPEN',
+ 'OPSYS',
+ 'OPTION',
+ 'OR',
+ 'ORDERED-JOIN',
+ 'ORDINAL',
+ 'OS-APPEND',
+ 'OS-COMMAND',
+ 'OS-COPY',
+ 'OS-CREATE-DIR',
+ 'OS-DELETE',
+ 'OS-DIR',
+ 'OS-DRIVE',
+ 'OS-DRIVES',
+ 'OS-ERROR',
+ 'OS-GETENV',
+ 'OS-RENAME',
+ 'OTHERWISE',
+ 'OUTPUT',
+ 'OVERLAY',
+ 'OVERRIDE',
+ 'OWNER',
+ 'PAGE',
+ 'PAGE-BOT',
+ 'PAGE-BOTT',
+ 'PAGE-BOTTO',
+ 'PAGE-BOTTOM',
+ 'PAGE-NUM',
+ 'PAGE-NUMB',
+ 'PAGE-NUMBE',
+ 'PAGE-NUMBER',
+ 'PAGE-SIZE',
+ 'PAGE-TOP',
+ 'PAGE-WID',
+ 'PAGE-WIDT',
+ 'PAGE-WIDTH',
+ 'PAGED',
+ 'PARAM',
+ 'PARAME',
+ 'PARAMET',
+ 'PARAMETE',
+ 'PARAMETER',
+ 'PARENT',
+ 'PARSE-STATUS',
+ 'PARTIAL-KEY',
+ 'PASCAL',
+ 'PASSWORD-FIELD',
+ 'PATHNAME',
+ 'PAUSE',
+ 'PBE-HASH-ALG',
+ 'PBE-HASH-ALGO',
+ 'PBE-HASH-ALGOR',
+ 'PBE-HASH-ALGORI',
+ 'PBE-HASH-ALGORIT',
+ 'PBE-HASH-ALGORITH',
+ 'PBE-HASH-ALGORITHM',
+ 'PBE-KEY-ROUNDS',
+ 'PDBNAME',
+ 'PERSIST',
+ 'PERSISTE',
+ 'PERSISTEN',
+ 'PERSISTENT',
+ 'PERSISTENT-CACHE-DISABLED',
+ 'PFC',
+ 'PFCO',
+ 'PFCOL',
+ 'PFCOLO',
+ 'PFCOLOR',
+ 'PIXELS',
+ 'PIXELS-PER-COL',
+ 'PIXELS-PER-COLU',
+ 'PIXELS-PER-COLUM',
+ 'PIXELS-PER-COLUMN',
+ 'PIXELS-PER-ROW',
+ 'POPUP-M',
+ 'POPUP-ME',
+ 'POPUP-MEN',
+ 'POPUP-MENU',
+ 'POPUP-O',
+ 'POPUP-ON',
+ 'POPUP-ONL',
+ 'POPUP-ONLY',
+ 'PORTRAIT',
+ 'POSITION',
+ 'PRECISION',
+ 'PREFER-DATASET',
+ 'PREPARE-STRING',
+ 'PREPARED',
+ 'PREPROC',
+ 'PREPROCE',
+ 'PREPROCES',
+ 'PREPROCESS',
+ 'PRESEL',
+ 'PRESELE',
+ 'PRESELEC',
+ 'PRESELECT',
+ 'PREV',
+ 'PREV-COLUMN',
+ 'PREV-SIBLING',
+ 'PREV-TAB-I',
+ 'PREV-TAB-IT',
+ 'PREV-TAB-ITE',
+ 'PREV-TAB-ITEM',
+ 'PRIMARY',
+ 'PRINTER',
+ 'PRINTER-CONTROL-HANDLE',
+ 'PRINTER-HDC',
+ 'PRINTER-NAME',
+ 'PRINTER-PORT',
+ 'PRINTER-SETUP',
+ 'PRIVATE',
+ 'PRIVATE-D',
+ 'PRIVATE-DA',
+ 'PRIVATE-DAT',
+ 'PRIVATE-DATA',
+ 'PRIVILEGES',
+ 'PROC-HA',
+ 'PROC-HAN',
+ 'PROC-HAND',
+ 'PROC-HANDL',
+ 'PROC-HANDLE',
+ 'PROC-ST',
+ 'PROC-STA',
+ 'PROC-STAT',
+ 'PROC-STATU',
+ 'PROC-STATUS',
+ 'PROC-TEXT',
+ 'PROC-TEXT-BUFFER',
+ 'PROCE',
+ 'PROCED',
+ 'PROCEDU',
+ 'PROCEDUR',
+ 'PROCEDURE',
+ 'PROCEDURE-CALL-TYPE',
+ 'PROCEDURE-TYPE',
+ 'PROCESS',
+ 'PROFILER',
+ 'PROGRAM-NAME',
+ 'PROGRESS',
+ 'PROGRESS-S',
+ 'PROGRESS-SO',
+ 'PROGRESS-SOU',
+ 'PROGRESS-SOUR',
+ 'PROGRESS-SOURC',
+ 'PROGRESS-SOURCE',
+ 'PROMPT',
+ 'PROMPT-F',
+ 'PROMPT-FO',
+ 'PROMPT-FOR',
+ 'PROMSGS',
+ 'PROPATH',
+ 'PROPERTY',
+ 'PROTECTED',
+ 'PROVERS',
+ 'PROVERSI',
+ 'PROVERSIO',
+ 'PROVERSION',
+ 'PROXY',
+ 'PROXY-PASSWORD',
+ 'PROXY-USERID',
+ 'PUBLIC',
+ 'PUBLIC-ID',
+ 'PUBLISH',
+ 'PUBLISHED-EVENTS',
+ 'PUT',
+ 'PUT-BYTE',
+ 'PUT-DOUBLE',
+ 'PUT-FLOAT',
+ 'PUT-INT64',
+ 'PUT-KEY-VAL',
+ 'PUT-KEY-VALU',
+ 'PUT-KEY-VALUE',
+ 'PUT-LONG',
+ 'PUT-SHORT',
+ 'PUT-STRING',
+ 'PUT-UNSIGNED-LONG',
+ 'PUTBYTE',
+ 'QUERY',
+ 'QUERY-CLOSE',
+ 'QUERY-OFF-END',
+ 'QUERY-OPEN',
+ 'QUERY-PREPARE',
+ 'QUERY-TUNING',
+ 'QUESTION',
+ 'QUIT',
+ 'QUOTER',
+ 'R-INDEX',
+ 'RADIO-BUTTONS',
+ 'RADIO-SET',
+ 'RANDOM',
+ 'RAW',
+ 'RAW-TRANSFER',
+ 'RCODE-INFO',
+ 'RCODE-INFOR',
+ 'RCODE-INFORM',
+ 'RCODE-INFORMA',
+ 'RCODE-INFORMAT',
+ 'RCODE-INFORMATI',
+ 'RCODE-INFORMATIO',
+ 'RCODE-INFORMATION',
+ 'READ-AVAILABLE',
+ 'READ-EXACT-NUM',
+ 'READ-FILE',
+ 'READ-JSON',
+ 'READ-ONLY',
+ 'READ-XML',
+ 'READ-XMLSCHEMA',
+ 'READKEY',
+ 'REAL',
+ 'RECID',
+ 'RECORD-LENGTH',
+ 'RECT',
+ 'RECTA',
+ 'RECTAN',
+ 'RECTANG',
+ 'RECTANGL',
+ 'RECTANGLE',
+ 'RECURSIVE',
+ 'REFERENCE-ONLY',
+ 'REFRESH',
+ 'REFRESH-AUDIT-POLICY',
+ 'REFRESHABLE',
+ 'REGISTER-DOMAIN',
+ 'RELEASE',
+ 'REMOTE',
+ 'REMOVE-EVENTS-PROCEDURE',
+ 'REMOVE-SUPER-PROCEDURE',
+ 'REPEAT',
+ 'REPLACE',
+ 'REPLACE-SELECTION-TEXT',
+ 'REPOSITION',
+ 'REPOSITION-BACKWARD',
+ 'REPOSITION-FORWARD',
+ 'REPOSITION-MODE',
+ 'REPOSITION-TO-ROW',
+ 'REPOSITION-TO-ROWID',
+ 'REQUEST',
+ 'REQUEST-INFO',
+ 'RESET',
+ 'RESIZA',
+ 'RESIZAB',
+ 'RESIZABL',
+ 'RESIZABLE',
+ 'RESIZE',
+ 'RESPONSE-INFO',
+ 'RESTART-ROW',
+ 'RESTART-ROWID',
+ 'RETAIN',
+ 'RETAIN-SHAPE',
+ 'RETRY',
+ 'RETRY-CANCEL',
+ 'RETURN',
+ 'RETURN-ALIGN',
+ 'RETURN-ALIGNE',
+ 'RETURN-INS',
+ 'RETURN-INSE',
+ 'RETURN-INSER',
+ 'RETURN-INSERT',
+ 'RETURN-INSERTE',
+ 'RETURN-INSERTED',
+ 'RETURN-TO-START-DI',
+ 'RETURN-TO-START-DIR',
+ 'RETURN-VAL',
+ 'RETURN-VALU',
+ 'RETURN-VALUE',
+ 'RETURN-VALUE-DATA-TYPE',
+ 'RETURNS',
+ 'REVERSE-FROM',
+ 'REVERT',
+ 'REVOKE',
+ 'RGB-VALUE',
+ 'RIGHT-ALIGNED',
+ 'RIGHT-TRIM',
+ 'ROLES',
+ 'ROUND',
+ 'ROUTINE-LEVEL',
+ 'ROW',
+ 'ROW-HEIGHT-CHARS',
+ 'ROW-HEIGHT-PIXELS',
+ 'ROW-MARKERS',
+ 'ROW-OF',
+ 'ROW-RESIZABLE',
+ 'ROWID',
+ 'RULE',
+ 'RUN',
+ 'RUN-PROCEDURE',
+ 'SAVE CACHE',
+ 'SAVE',
+ 'SAVE-AS',
+ 'SAVE-FILE',
+ 'SAX-COMPLE',
+ 'SAX-COMPLET',
+ 'SAX-COMPLETE',
+ 'SAX-PARSE',
+ 'SAX-PARSE-FIRST',
+ 'SAX-PARSE-NEXT',
+ 'SAX-PARSER-ERROR',
+ 'SAX-RUNNING',
+ 'SAX-UNINITIALIZED',
+ 'SAX-WRITE-BEGIN',
+ 'SAX-WRITE-COMPLETE',
+ 'SAX-WRITE-CONTENT',
+ 'SAX-WRITE-ELEMENT',
+ 'SAX-WRITE-ERROR',
+ 'SAX-WRITE-IDLE',
+ 'SAX-WRITE-TAG',
+ 'SAX-WRITER',
+ 'SCHEMA',
+ 'SCHEMA-LOCATION',
+ 'SCHEMA-MARSHAL',
+ 'SCHEMA-PATH',
+ 'SCREEN',
+ 'SCREEN-IO',
+ 'SCREEN-LINES',
+ 'SCREEN-VAL',
+ 'SCREEN-VALU',
+ 'SCREEN-VALUE',
+ 'SCROLL',
+ 'SCROLL-BARS',
+ 'SCROLL-DELTA',
+ 'SCROLL-OFFSET',
+ 'SCROLL-TO-CURRENT-ROW',
+ 'SCROLL-TO-I',
+ 'SCROLL-TO-IT',
+ 'SCROLL-TO-ITE',
+ 'SCROLL-TO-ITEM',
+ 'SCROLL-TO-SELECTED-ROW',
+ 'SCROLLABLE',
+ 'SCROLLBAR-H',
+ 'SCROLLBAR-HO',
+ 'SCROLLBAR-HOR',
+ 'SCROLLBAR-HORI',
+ 'SCROLLBAR-HORIZ',
+ 'SCROLLBAR-HORIZO',
+ 'SCROLLBAR-HORIZON',
+ 'SCROLLBAR-HORIZONT',
+ 'SCROLLBAR-HORIZONTA',
+ 'SCROLLBAR-HORIZONTAL',
+ 'SCROLLBAR-V',
+ 'SCROLLBAR-VE',
+ 'SCROLLBAR-VER',
+ 'SCROLLBAR-VERT',
+ 'SCROLLBAR-VERTI',
+ 'SCROLLBAR-VERTIC',
+ 'SCROLLBAR-VERTICA',
+ 'SCROLLBAR-VERTICAL',
+ 'SCROLLED-ROW-POS',
+ 'SCROLLED-ROW-POSI',
+ 'SCROLLED-ROW-POSIT',
+ 'SCROLLED-ROW-POSITI',
+ 'SCROLLED-ROW-POSITIO',
+ 'SCROLLED-ROW-POSITION',
+ 'SCROLLING',
+ 'SDBNAME',
+ 'SEAL',
+ 'SEAL-TIMESTAMP',
+ 'SEARCH',
+ 'SEARCH-SELF',
+ 'SEARCH-TARGET',
+ 'SECTION',
+ 'SECURITY-POLICY',
+ 'SEEK',
+ 'SELECT',
+ 'SELECT-ALL',
+ 'SELECT-FOCUSED-ROW',
+ 'SELECT-NEXT-ROW',
+ 'SELECT-PREV-ROW',
+ 'SELECT-ROW',
+ 'SELECTABLE',
+ 'SELECTED',
+ 'SELECTION',
+ 'SELECTION-END',
+ 'SELECTION-LIST',
+ 'SELECTION-START',
+ 'SELECTION-TEXT',
+ 'SELF',
+ 'SEND',
+ 'SEND-SQL-STATEMENT',
+ 'SENSITIVE',
+ 'SEPARATE-CONNECTION',
+ 'SEPARATOR-FGCOLOR',
+ 'SEPARATORS',
+ 'SERIALIZABLE',
+ 'SERIALIZE-HIDDEN',
+ 'SERIALIZE-NAME',
+ 'SERVER',
+ 'SERVER-CONNECTION-BOUND',
+ 'SERVER-CONNECTION-BOUND-REQUEST',
+ 'SERVER-CONNECTION-CONTEXT',
+ 'SERVER-CONNECTION-ID',
+ 'SERVER-OPERATING-MODE',
+ 'SESSION',
+ 'SESSION-ID',
+ 'SET',
+ 'SET-APPL-CONTEXT',
+ 'SET-ATTR-CALL-TYPE',
+ 'SET-ATTRIBUTE-NODE',
+ 'SET-BLUE',
+ 'SET-BLUE-',
+ 'SET-BLUE-V',
+ 'SET-BLUE-VA',
+ 'SET-BLUE-VAL',
+ 'SET-BLUE-VALU',
+ 'SET-BLUE-VALUE',
+ 'SET-BREAK',
+ 'SET-BUFFERS',
+ 'SET-CALLBACK',
+ 'SET-CLIENT',
+ 'SET-COMMIT',
+ 'SET-CONTENTS',
+ 'SET-CURRENT-VALUE',
+ 'SET-DB-CLIENT',
+ 'SET-DYNAMIC',
+ 'SET-EVENT-MANAGER-OPTION',
+ 'SET-GREEN',
+ 'SET-GREEN-',
+ 'SET-GREEN-V',
+ 'SET-GREEN-VA',
+ 'SET-GREEN-VAL',
+ 'SET-GREEN-VALU',
+ 'SET-GREEN-VALUE',
+ 'SET-INPUT-SOURCE',
+ 'SET-OPTION',
+ 'SET-OUTPUT-DESTINATION',
+ 'SET-PARAMETER',
+ 'SET-POINTER-VALUE',
+ 'SET-PROPERTY',
+ 'SET-RED',
+ 'SET-RED-',
+ 'SET-RED-V',
+ 'SET-RED-VA',
+ 'SET-RED-VAL',
+ 'SET-RED-VALU',
+ 'SET-RED-VALUE',
+ 'SET-REPOSITIONED-ROW',
+ 'SET-RGB-VALUE',
+ 'SET-ROLLBACK',
+ 'SET-SELECTION',
+ 'SET-SIZE',
+ 'SET-SORT-ARROW',
+ 'SET-WAIT-STATE',
+ 'SETUSER',
+ 'SETUSERI',
+ 'SETUSERID',
+ 'SHA1-DIGEST',
+ 'SHARE',
+ 'SHARE-',
+ 'SHARE-L',
+ 'SHARE-LO',
+ 'SHARE-LOC',
+ 'SHARE-LOCK',
+ 'SHARED',
+ 'SHOW-IN-TASKBAR',
+ 'SHOW-STAT',
+ 'SHOW-STATS',
+ 'SIDE-LAB',
+ 'SIDE-LABE',
+ 'SIDE-LABEL',
+ 'SIDE-LABEL-H',
+ 'SIDE-LABEL-HA',
+ 'SIDE-LABEL-HAN',
+ 'SIDE-LABEL-HAND',
+ 'SIDE-LABEL-HANDL',
+ 'SIDE-LABEL-HANDLE',
+ 'SIDE-LABELS',
+ 'SIGNATURE',
+ 'SILENT',
+ 'SIMPLE',
+ 'SINGLE',
+ 'SINGLE-RUN',
+ 'SINGLETON',
+ 'SIZE',
+ 'SIZE-C',
+ 'SIZE-CH',
+ 'SIZE-CHA',
+ 'SIZE-CHAR',
+ 'SIZE-CHARS',
+ 'SIZE-P',
+ 'SIZE-PI',
+ 'SIZE-PIX',
+ 'SIZE-PIXE',
+ 'SIZE-PIXEL',
+ 'SIZE-PIXELS',
+ 'SKIP',
+ 'SKIP-DELETED-RECORD',
+ 'SLIDER',
+ 'SMALL-ICON',
+ 'SMALL-TITLE',
+ 'SMALLINT',
+ 'SOME',
+ 'SORT',
+ 'SORT-ASCENDING',
+ 'SORT-NUMBER',
+ 'SOURCE',
+ 'SOURCE-PROCEDURE',
+ 'SPACE',
+ 'SQL',
+ 'SQRT',
+ 'SSL-SERVER-NAME',
+ 'STANDALONE',
+ 'START',
+ 'START-DOCUMENT',
+ 'START-ELEMENT',
+ 'START-MOVE',
+ 'START-RESIZE',
+ 'START-ROW-RESIZE',
+ 'STATE-DETAIL',
+ 'STATIC',
+ 'STATUS',
+ 'STATUS-AREA',
+ 'STATUS-AREA-FONT',
+ 'STDCALL',
+ 'STOP',
+ 'STOP-AFTER',
+ 'STOP-PARSING',
+ 'STOPPE',
+ 'STOPPED',
+ 'STORED-PROC',
+ 'STORED-PROCE',
+ 'STORED-PROCED',
+ 'STORED-PROCEDU',
+ 'STORED-PROCEDUR',
+ 'STORED-PROCEDURE',
+ 'STREAM',
+ 'STREAM-HANDLE',
+ 'STREAM-IO',
+ 'STRETCH-TO-FIT',
+ 'STRICT',
+ 'STRICT-ENTITY-RESOLUTION',
+ 'STRING',
+ 'STRING-VALUE',
+ 'STRING-XREF',
+ 'SUB-AVE',
+ 'SUB-AVER',
+ 'SUB-AVERA',
+ 'SUB-AVERAG',
+ 'SUB-AVERAGE',
+ 'SUB-COUNT',
+ 'SUB-MAXIMUM',
+ 'SUB-MENU',
+ 'SUB-MIN',
+ 'SUB-MINIMUM',
+ 'SUB-TOTAL',
+ 'SUBSCRIBE',
+ 'SUBST',
+ 'SUBSTI',
+ 'SUBSTIT',
+ 'SUBSTITU',
+ 'SUBSTITUT',
+ 'SUBSTITUTE',
+ 'SUBSTR',
+ 'SUBSTRI',
+ 'SUBSTRIN',
+ 'SUBSTRING',
+ 'SUBTYPE',
+ 'SUM',
+ 'SUM-MAX',
+ 'SUM-MAXI',
+ 'SUM-MAXIM',
+ 'SUM-MAXIMU',
+ 'SUPER',
+ 'SUPER-PROCEDURES',
+ 'SUPPRESS-NAMESPACE-PROCESSING',
+ 'SUPPRESS-W',
+ 'SUPPRESS-WA',
+ 'SUPPRESS-WAR',
+ 'SUPPRESS-WARN',
+ 'SUPPRESS-WARNI',
+ 'SUPPRESS-WARNIN',
+ 'SUPPRESS-WARNING',
+ 'SUPPRESS-WARNINGS',
+ 'SYMMETRIC-ENCRYPTION-ALGORITHM',
+ 'SYMMETRIC-ENCRYPTION-IV',
+ 'SYMMETRIC-ENCRYPTION-KEY',
+ 'SYMMETRIC-SUPPORT',
+ 'SYSTEM-ALERT',
+ 'SYSTEM-ALERT-',
+ 'SYSTEM-ALERT-B',
+ 'SYSTEM-ALERT-BO',
+ 'SYSTEM-ALERT-BOX',
+ 'SYSTEM-ALERT-BOXE',
+ 'SYSTEM-ALERT-BOXES',
+ 'SYSTEM-DIALOG',
+ 'SYSTEM-HELP',
+ 'SYSTEM-ID',
+ 'TAB-POSITION',
+ 'TAB-STOP',
+ 'TABLE',
+ 'TABLE-HANDLE',
+ 'TABLE-NUMBER',
+ 'TABLE-SCAN',
+ 'TARGET',
+ 'TARGET-PROCEDURE',
+ 'TEMP-DIR',
+ 'TEMP-DIRE',
+ 'TEMP-DIREC',
+ 'TEMP-DIRECT',
+ 'TEMP-DIRECTO',
+ 'TEMP-DIRECTOR',
+ 'TEMP-DIRECTORY',
+ 'TEMP-TABLE',
+ 'TEMP-TABLE-PREPARE',
+ 'TERM',
+ 'TERMI',
+ 'TERMIN',
+ 'TERMINA',
+ 'TERMINAL',
+ 'TERMINATE',
+ 'TEXT',
+ 'TEXT-CURSOR',
+ 'TEXT-SEG-GROW',
+ 'TEXT-SELECTED',
+ 'THEN',
+ 'THIS-OBJECT',
+ 'THIS-PROCEDURE',
+ 'THREAD-SAFE',
+ 'THREE-D',
+ 'THROUGH',
+ 'THROW',
+ 'THRU',
+ 'TIC-MARKS',
+ 'TIME',
+ 'TIME-SOURCE',
+ 'TITLE',
+ 'TITLE-BGC',
+ 'TITLE-BGCO',
+ 'TITLE-BGCOL',
+ 'TITLE-BGCOLO',
+ 'TITLE-BGCOLOR',
+ 'TITLE-DC',
+ 'TITLE-DCO',
+ 'TITLE-DCOL',
+ 'TITLE-DCOLO',
+ 'TITLE-DCOLOR',
+ 'TITLE-FGC',
+ 'TITLE-FGCO',
+ 'TITLE-FGCOL',
+ 'TITLE-FGCOLO',
+ 'TITLE-FGCOLOR',
+ 'TITLE-FO',
+ 'TITLE-FON',
+ 'TITLE-FONT',
+ 'TO',
+ 'TO-ROWID',
+ 'TODAY',
+ 'TOGGLE-BOX',
+ 'TOOLTIP',
+ 'TOOLTIPS',
+ 'TOP-NAV-QUERY',
+ 'TOP-ONLY',
+ 'TOPIC',
+ 'TOTAL',
+ 'TRAILING',
+ 'TRANS',
+ 'TRANS-INIT-PROCEDURE',
+ 'TRANSACTION',
+ 'TRANSACTION-MODE',
+ 'TRANSPARENT',
+ 'TRIGGER',
+ 'TRIGGERS',
+ 'TRIM',
+ 'TRUE',
+ 'TRUNC',
+ 'TRUNCA',
+ 'TRUNCAT',
+ 'TRUNCATE',
+ 'TYPE',
+ 'TYPE-OF',
+ 'UNBOX',
+ 'UNBUFF',
+ 'UNBUFFE',
+ 'UNBUFFER',
+ 'UNBUFFERE',
+ 'UNBUFFERED',
+ 'UNDERL',
+ 'UNDERLI',
+ 'UNDERLIN',
+ 'UNDERLINE',
+ 'UNDO',
+ 'UNFORM',
+ 'UNFORMA',
+ 'UNFORMAT',
+ 'UNFORMATT',
+ 'UNFORMATTE',
+ 'UNFORMATTED',
+ 'UNION',
+ 'UNIQUE',
+ 'UNIQUE-ID',
+ 'UNIQUE-MATCH',
+ 'UNIX',
+ 'UNLESS-HIDDEN',
+ 'UNLOAD',
+ 'UNSIGNED-LONG',
+ 'UNSUBSCRIBE',
+ 'UP',
+ 'UPDATE',
+ 'UPDATE-ATTRIBUTE',
+ 'URL',
+ 'URL-DECODE',
+ 'URL-ENCODE',
+ 'URL-PASSWORD',
+ 'URL-USERID',
+ 'USE',
+ 'USE-DICT-EXPS',
+ 'USE-FILENAME',
+ 'USE-INDEX',
+ 'USE-REVVIDEO',
+ 'USE-TEXT',
+ 'USE-UNDERLINE',
+ 'USE-WIDGET-POOL',
+ 'USER',
+ 'USER-ID',
+ 'USERID',
+ 'USING',
+ 'V6DISPLAY',
+ 'V6FRAME',
+ 'VALID-EVENT',
+ 'VALID-HANDLE',
+ 'VALID-OBJECT',
+ 'VALIDATE',
+ 'VALIDATE-EXPRESSION',
+ 'VALIDATE-MESSAGE',
+ 'VALIDATE-SEAL',
+ 'VALIDATION-ENABLED',
+ 'VALUE',
+ 'VALUE-CHANGED',
+ 'VALUES',
+ 'VAR',
+ 'VARI',
+ 'VARIA',
+ 'VARIAB',
+ 'VARIABL',
+ 'VARIABLE',
+ 'VERBOSE',
+ 'VERSION',
+ 'VERT',
+ 'VERTI',
+ 'VERTIC',
+ 'VERTICA',
+ 'VERTICAL',
+ 'VIEW',
+ 'VIEW-AS',
+ 'VIEW-FIRST-COLUMN-ON-REOPEN',
+ 'VIRTUAL-HEIGHT',
+ 'VIRTUAL-HEIGHT-',
+ 'VIRTUAL-HEIGHT-C',
+ 'VIRTUAL-HEIGHT-CH',
+ 'VIRTUAL-HEIGHT-CHA',
+ 'VIRTUAL-HEIGHT-CHAR',
+ 'VIRTUAL-HEIGHT-CHARS',
+ 'VIRTUAL-HEIGHT-P',
+ 'VIRTUAL-HEIGHT-PI',
+ 'VIRTUAL-HEIGHT-PIX',
+ 'VIRTUAL-HEIGHT-PIXE',
+ 'VIRTUAL-HEIGHT-PIXEL',
+ 'VIRTUAL-HEIGHT-PIXELS',
+ 'VIRTUAL-WIDTH',
+ 'VIRTUAL-WIDTH-',
+ 'VIRTUAL-WIDTH-C',
+ 'VIRTUAL-WIDTH-CH',
+ 'VIRTUAL-WIDTH-CHA',
+ 'VIRTUAL-WIDTH-CHAR',
+ 'VIRTUAL-WIDTH-CHARS',
+ 'VIRTUAL-WIDTH-P',
+ 'VIRTUAL-WIDTH-PI',
+ 'VIRTUAL-WIDTH-PIX',
+ 'VIRTUAL-WIDTH-PIXE',
+ 'VIRTUAL-WIDTH-PIXEL',
+ 'VIRTUAL-WIDTH-PIXELS',
+ 'VISIBLE',
+ 'VOID',
+ 'WAIT',
+ 'WAIT-FOR',
+ 'WARNING',
+ 'WEB-CONTEXT',
+ 'WEEKDAY',
+ 'WHEN',
+ 'WHERE',
+ 'WHILE',
+ 'WIDGET',
+ 'WIDGET-E',
+ 'WIDGET-EN',
+ 'WIDGET-ENT',
+ 'WIDGET-ENTE',
+ 'WIDGET-ENTER',
+ 'WIDGET-ID',
+ 'WIDGET-L',
+ 'WIDGET-LE',
+ 'WIDGET-LEA',
+ 'WIDGET-LEAV',
+ 'WIDGET-LEAVE',
+ 'WIDGET-POOL',
+ 'WIDTH',
+ 'WIDTH-',
+ 'WIDTH-C',
+ 'WIDTH-CH',
+ 'WIDTH-CHA',
+ 'WIDTH-CHAR',
+ 'WIDTH-CHARS',
+ 'WIDTH-P',
+ 'WIDTH-PI',
+ 'WIDTH-PIX',
+ 'WIDTH-PIXE',
+ 'WIDTH-PIXEL',
+ 'WIDTH-PIXELS',
+ 'WINDOW',
+ 'WINDOW-MAXIM',
+ 'WINDOW-MAXIMI',
+ 'WINDOW-MAXIMIZ',
+ 'WINDOW-MAXIMIZE',
+ 'WINDOW-MAXIMIZED',
+ 'WINDOW-MINIM',
+ 'WINDOW-MINIMI',
+ 'WINDOW-MINIMIZ',
+ 'WINDOW-MINIMIZE',
+ 'WINDOW-MINIMIZED',
+ 'WINDOW-NAME',
+ 'WINDOW-NORMAL',
+ 'WINDOW-STA',
+ 'WINDOW-STAT',
+ 'WINDOW-STATE',
+ 'WINDOW-SYSTEM',
+ 'WITH',
+ 'WORD-INDEX',
+ 'WORD-WRAP',
+ 'WORK-AREA-HEIGHT-PIXELS',
+ 'WORK-AREA-WIDTH-PIXELS',
+ 'WORK-AREA-X',
+ 'WORK-AREA-Y',
+ 'WORK-TAB',
+ 'WORK-TABL',
+ 'WORK-TABLE',
+ 'WORKFILE',
+ 'WRITE',
+ 'WRITE-CDATA',
+ 'WRITE-CHARACTERS',
+ 'WRITE-COMMENT',
+ 'WRITE-DATA-ELEMENT',
+ 'WRITE-EMPTY-ELEMENT',
+ 'WRITE-ENTITY-REF',
+ 'WRITE-EXTERNAL-DTD',
+ 'WRITE-FRAGMENT',
+ 'WRITE-JSON',
+ 'WRITE-MESSAGE',
+ 'WRITE-PROCESSING-INSTRUCTION',
+ 'WRITE-STATUS',
+ 'WRITE-XML',
+ 'WRITE-XMLSCHEMA',
+ 'X',
+ 'X-OF',
+ 'XCODE',
+ 'XML-DATA-TYPE',
+ 'XML-ENTITY-EXPANSION-LIMIT',
+ 'XML-NODE-TYPE',
+ 'XML-SCHEMA-PATH',
+ 'XML-STRICT-ENTITY-RESOLUTION',
+ 'XML-SUPPRESS-NAMESPACE-PROCESSING',
+ 'XREF',
+ 'XREF-XML',
+ 'Y',
+ 'Y-OF',
+ 'YEAR',
+ 'YEAR-OFFSET',
+ 'YES',
+ 'YES-NO',
+ 'YES-NO-CANCEL'
+)
diff --git a/pygments/lexers/_php_builtins.py b/pygments/lexers/_php_builtins.py
new file mode 100644
index 0000000..a899f1d
--- /dev/null
+++ b/pygments/lexers/_php_builtins.py
@@ -0,0 +1,3325 @@
+"""
+ pygments.lexers._php_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file loads the function names and their modules from the
+ php webpage and generates itself.
+
+ Run with `python -I` to regenerate.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+MODULES = {'APCu': ('apcu_add',
+ 'apcu_cache_info',
+ 'apcu_cas',
+ 'apcu_clear_cache',
+ 'apcu_dec',
+ 'apcu_delete',
+ 'apcu_enabled',
+ 'apcu_entry',
+ 'apcu_exists',
+ 'apcu_fetch',
+ 'apcu_inc',
+ 'apcu_key_info',
+ 'apcu_sma_info',
+ 'apcu_store'),
+ 'Aliases and deprecated Mysqli': ('mysqli_connect',
+ 'mysqli_execute',
+ 'mysqli_get_client_stats',
+ 'mysqli_get_links_stats',
+ 'mysqli_report'),
+ 'Apache': ('apache_child_terminate',
+ 'apache_get_modules',
+ 'apache_get_version',
+ 'apache_getenv',
+ 'apache_lookup_uri',
+ 'apache_note',
+ 'apache_request_headers',
+ 'apache_response_headers',
+ 'apache_setenv',
+ 'getallheaders',
+ 'virtual'),
+ 'Array': ('array_change_key_case',
+ 'array_chunk',
+ 'array_column',
+ 'array_combine',
+ 'array_count_values',
+ 'array_diff_assoc',
+ 'array_diff_key',
+ 'array_diff_uassoc',
+ 'array_diff_ukey',
+ 'array_diff',
+ 'array_fill_keys',
+ 'array_fill',
+ 'array_filter',
+ 'array_flip',
+ 'array_intersect_assoc',
+ 'array_intersect_key',
+ 'array_intersect_uassoc',
+ 'array_intersect_ukey',
+ 'array_intersect',
+ 'array_is_list',
+ 'array_key_exists',
+ 'array_key_first',
+ 'array_key_last',
+ 'array_keys',
+ 'array_map',
+ 'array_merge_recursive',
+ 'array_merge',
+ 'array_multisort',
+ 'array_pad',
+ 'array_pop',
+ 'array_product',
+ 'array_push',
+ 'array_rand',
+ 'array_reduce',
+ 'array_replace_recursive',
+ 'array_replace',
+ 'array_reverse',
+ 'array_search',
+ 'array_shift',
+ 'array_slice',
+ 'array_splice',
+ 'array_sum',
+ 'array_udiff_assoc',
+ 'array_udiff_uassoc',
+ 'array_udiff',
+ 'array_uintersect_assoc',
+ 'array_uintersect_uassoc',
+ 'array_uintersect',
+ 'array_unique',
+ 'array_unshift',
+ 'array_values',
+ 'array_walk_recursive',
+ 'array_walk',
+ 'array',
+ 'arsort',
+ 'asort',
+ 'compact',
+ 'count',
+ 'current',
+ 'each',
+ 'end',
+ 'extract',
+ 'in_array',
+ 'key_exists',
+ 'key',
+ 'krsort',
+ 'ksort',
+ 'list',
+ 'natcasesort',
+ 'natsort',
+ 'next',
+ 'pos',
+ 'prev',
+ 'range',
+ 'reset',
+ 'rsort',
+ 'shuffle',
+ 'sizeof',
+ 'sort',
+ 'uasort',
+ 'uksort',
+ 'usort'),
+ 'BC Math': ('bcadd',
+ 'bccomp',
+ 'bcdiv',
+ 'bcmod',
+ 'bcmul',
+ 'bcpow',
+ 'bcpowmod',
+ 'bcscale',
+ 'bcsqrt',
+ 'bcsub'),
+ 'Bzip2': ('bzclose',
+ 'bzcompress',
+ 'bzdecompress',
+ 'bzerrno',
+ 'bzerror',
+ 'bzerrstr',
+ 'bzflush',
+ 'bzopen',
+ 'bzread',
+ 'bzwrite'),
+ 'COM': ('com_create_guid',
+ 'com_event_sink',
+ 'com_get_active_object',
+ 'com_load_typelib',
+ 'com_message_pump',
+ 'com_print_typeinfo',
+ 'variant_abs',
+ 'variant_add',
+ 'variant_and',
+ 'variant_cast',
+ 'variant_cat',
+ 'variant_cmp',
+ 'variant_date_from_timestamp',
+ 'variant_date_to_timestamp',
+ 'variant_div',
+ 'variant_eqv',
+ 'variant_fix',
+ 'variant_get_type',
+ 'variant_idiv',
+ 'variant_imp',
+ 'variant_int',
+ 'variant_mod',
+ 'variant_mul',
+ 'variant_neg',
+ 'variant_not',
+ 'variant_or',
+ 'variant_pow',
+ 'variant_round',
+ 'variant_set_type',
+ 'variant_set',
+ 'variant_sub',
+ 'variant_xor'),
+ 'CSPRNG': ('random_bytes', 'random_int'),
+ 'CUBRID': ('cubrid_bind',
+ 'cubrid_close_prepare',
+ 'cubrid_close_request',
+ 'cubrid_col_get',
+ 'cubrid_col_size',
+ 'cubrid_column_names',
+ 'cubrid_column_types',
+ 'cubrid_commit',
+ 'cubrid_connect_with_url',
+ 'cubrid_connect',
+ 'cubrid_current_oid',
+ 'cubrid_disconnect',
+ 'cubrid_drop',
+ 'cubrid_error_code_facility',
+ 'cubrid_error_code',
+ 'cubrid_error_msg',
+ 'cubrid_execute',
+ 'cubrid_fetch',
+ 'cubrid_free_result',
+ 'cubrid_get_autocommit',
+ 'cubrid_get_charset',
+ 'cubrid_get_class_name',
+ 'cubrid_get_client_info',
+ 'cubrid_get_db_parameter',
+ 'cubrid_get_query_timeout',
+ 'cubrid_get_server_info',
+ 'cubrid_get',
+ 'cubrid_insert_id',
+ 'cubrid_is_instance',
+ 'cubrid_lob_close',
+ 'cubrid_lob_export',
+ 'cubrid_lob_get',
+ 'cubrid_lob_send',
+ 'cubrid_lob_size',
+ 'cubrid_lob2_bind',
+ 'cubrid_lob2_close',
+ 'cubrid_lob2_export',
+ 'cubrid_lob2_import',
+ 'cubrid_lob2_new',
+ 'cubrid_lob2_read',
+ 'cubrid_lob2_seek64',
+ 'cubrid_lob2_seek',
+ 'cubrid_lob2_size64',
+ 'cubrid_lob2_size',
+ 'cubrid_lob2_tell64',
+ 'cubrid_lob2_tell',
+ 'cubrid_lob2_write',
+ 'cubrid_lock_read',
+ 'cubrid_lock_write',
+ 'cubrid_move_cursor',
+ 'cubrid_next_result',
+ 'cubrid_num_cols',
+ 'cubrid_num_rows',
+ 'cubrid_pconnect_with_url',
+ 'cubrid_pconnect',
+ 'cubrid_prepare',
+ 'cubrid_put',
+ 'cubrid_rollback',
+ 'cubrid_schema',
+ 'cubrid_seq_drop',
+ 'cubrid_seq_insert',
+ 'cubrid_seq_put',
+ 'cubrid_set_add',
+ 'cubrid_set_autocommit',
+ 'cubrid_set_db_parameter',
+ 'cubrid_set_drop',
+ 'cubrid_set_query_timeout',
+ 'cubrid_version'),
+ 'Calendar': ('cal_days_in_month',
+ 'cal_from_jd',
+ 'cal_info',
+ 'cal_to_jd',
+ 'easter_date',
+ 'easter_days',
+ 'frenchtojd',
+ 'gregoriantojd',
+ 'jddayofweek',
+ 'jdmonthname',
+ 'jdtofrench',
+ 'jdtogregorian',
+ 'jdtojewish',
+ 'jdtojulian',
+ 'jdtounix',
+ 'jewishtojd',
+ 'juliantojd',
+ 'unixtojd'),
+ 'Classes/Object': ('__autoload',
+ 'class_alias',
+ 'class_exists',
+ 'enum_exists',
+ 'get_called_class',
+ 'get_class_methods',
+ 'get_class_vars',
+ 'get_class',
+ 'get_declared_classes',
+ 'get_declared_interfaces',
+ 'get_declared_traits',
+ 'get_mangled_object_vars',
+ 'get_object_vars',
+ 'get_parent_class',
+ 'interface_exists',
+ 'is_a',
+ 'is_subclass_of',
+ 'method_exists',
+ 'property_exists',
+ 'trait_exists'),
+ 'Ctype': ('ctype_alnum',
+ 'ctype_alpha',
+ 'ctype_cntrl',
+ 'ctype_digit',
+ 'ctype_graph',
+ 'ctype_lower',
+ 'ctype_print',
+ 'ctype_punct',
+ 'ctype_space',
+ 'ctype_upper',
+ 'ctype_xdigit'),
+ 'DBA': ('dba_close',
+ 'dba_delete',
+ 'dba_exists',
+ 'dba_fetch',
+ 'dba_firstkey',
+ 'dba_handlers',
+ 'dba_insert',
+ 'dba_key_split',
+ 'dba_list',
+ 'dba_nextkey',
+ 'dba_open',
+ 'dba_optimize',
+ 'dba_popen',
+ 'dba_replace',
+ 'dba_sync'),
+ 'DOM': ('dom_import_simplexml',),
+ 'Date/Time': ('checkdate',
+ 'date_add',
+ 'date_create_from_format',
+ 'date_create_immutable_from_format',
+ 'date_create_immutable',
+ 'date_create',
+ 'date_date_set',
+ 'date_default_timezone_get',
+ 'date_default_timezone_set',
+ 'date_diff',
+ 'date_format',
+ 'date_get_last_errors',
+ 'date_interval_create_from_date_string',
+ 'date_interval_format',
+ 'date_isodate_set',
+ 'date_modify',
+ 'date_offset_get',
+ 'date_parse_from_format',
+ 'date_parse',
+ 'date_sub',
+ 'date_sun_info',
+ 'date_sunrise',
+ 'date_sunset',
+ 'date_time_set',
+ 'date_timestamp_get',
+ 'date_timestamp_set',
+ 'date_timezone_get',
+ 'date_timezone_set',
+ 'date',
+ 'getdate',
+ 'gettimeofday',
+ 'gmdate',
+ 'gmmktime',
+ 'gmstrftime',
+ 'idate',
+ 'localtime',
+ 'microtime',
+ 'mktime',
+ 'strftime',
+ 'strptime',
+ 'strtotime',
+ 'time',
+ 'timezone_abbreviations_list',
+ 'timezone_identifiers_list',
+ 'timezone_location_get',
+ 'timezone_name_from_abbr',
+ 'timezone_name_get',
+ 'timezone_offset_get',
+ 'timezone_open',
+ 'timezone_transitions_get',
+ 'timezone_version_get'),
+ 'Direct IO': ('dio_close',
+ 'dio_fcntl',
+ 'dio_open',
+ 'dio_read',
+ 'dio_seek',
+ 'dio_stat',
+ 'dio_tcsetattr',
+ 'dio_truncate',
+ 'dio_write'),
+ 'Directory': ('chdir',
+ 'chroot',
+ 'closedir',
+ 'dir',
+ 'getcwd',
+ 'opendir',
+ 'readdir',
+ 'rewinddir',
+ 'scandir'),
+ 'Eio': ('eio_busy',
+ 'eio_cancel',
+ 'eio_chmod',
+ 'eio_chown',
+ 'eio_close',
+ 'eio_custom',
+ 'eio_dup2',
+ 'eio_event_loop',
+ 'eio_fallocate',
+ 'eio_fchmod',
+ 'eio_fchown',
+ 'eio_fdatasync',
+ 'eio_fstat',
+ 'eio_fstatvfs',
+ 'eio_fsync',
+ 'eio_ftruncate',
+ 'eio_futime',
+ 'eio_get_event_stream',
+ 'eio_get_last_error',
+ 'eio_grp_add',
+ 'eio_grp_cancel',
+ 'eio_grp_limit',
+ 'eio_grp',
+ 'eio_init',
+ 'eio_link',
+ 'eio_lstat',
+ 'eio_mkdir',
+ 'eio_mknod',
+ 'eio_nop',
+ 'eio_npending',
+ 'eio_nready',
+ 'eio_nreqs',
+ 'eio_nthreads',
+ 'eio_open',
+ 'eio_poll',
+ 'eio_read',
+ 'eio_readahead',
+ 'eio_readdir',
+ 'eio_readlink',
+ 'eio_realpath',
+ 'eio_rename',
+ 'eio_rmdir',
+ 'eio_seek',
+ 'eio_sendfile',
+ 'eio_set_max_idle',
+ 'eio_set_max_parallel',
+ 'eio_set_max_poll_reqs',
+ 'eio_set_max_poll_time',
+ 'eio_set_min_parallel',
+ 'eio_stat',
+ 'eio_statvfs',
+ 'eio_symlink',
+ 'eio_sync_file_range',
+ 'eio_sync',
+ 'eio_syncfs',
+ 'eio_truncate',
+ 'eio_unlink',
+ 'eio_utime',
+ 'eio_write'),
+ 'Enchant': ('enchant_broker_describe',
+ 'enchant_broker_dict_exists',
+ 'enchant_broker_free_dict',
+ 'enchant_broker_free',
+ 'enchant_broker_get_dict_path',
+ 'enchant_broker_get_error',
+ 'enchant_broker_init',
+ 'enchant_broker_list_dicts',
+ 'enchant_broker_request_dict',
+ 'enchant_broker_request_pwl_dict',
+ 'enchant_broker_set_dict_path',
+ 'enchant_broker_set_ordering',
+ 'enchant_dict_add_to_personal',
+ 'enchant_dict_add_to_session',
+ 'enchant_dict_add',
+ 'enchant_dict_check',
+ 'enchant_dict_describe',
+ 'enchant_dict_get_error',
+ 'enchant_dict_is_added',
+ 'enchant_dict_is_in_session',
+ 'enchant_dict_quick_check',
+ 'enchant_dict_store_replacement',
+ 'enchant_dict_suggest'),
+ 'Error Handling': ('debug_backtrace',
+ 'debug_print_backtrace',
+ 'error_clear_last',
+ 'error_get_last',
+ 'error_log',
+ 'error_reporting',
+ 'restore_error_handler',
+ 'restore_exception_handler',
+ 'set_error_handler',
+ 'set_exception_handler',
+ 'trigger_error',
+ 'user_error'),
+ 'Exif': ('exif_imagetype',
+ 'exif_read_data',
+ 'exif_tagname',
+ 'exif_thumbnail',
+ 'read_exif_data'),
+ 'Expect': ('expect_expectl', 'expect_popen'),
+ 'FDF': ('fdf_add_doc_javascript',
+ 'fdf_add_template',
+ 'fdf_close',
+ 'fdf_create',
+ 'fdf_enum_values',
+ 'fdf_errno',
+ 'fdf_error',
+ 'fdf_get_ap',
+ 'fdf_get_attachment',
+ 'fdf_get_encoding',
+ 'fdf_get_file',
+ 'fdf_get_flags',
+ 'fdf_get_opt',
+ 'fdf_get_status',
+ 'fdf_get_value',
+ 'fdf_get_version',
+ 'fdf_header',
+ 'fdf_next_field_name',
+ 'fdf_open_string',
+ 'fdf_open',
+ 'fdf_remove_item',
+ 'fdf_save_string',
+ 'fdf_save',
+ 'fdf_set_ap',
+ 'fdf_set_encoding',
+ 'fdf_set_file',
+ 'fdf_set_flags',
+ 'fdf_set_javascript_action',
+ 'fdf_set_on_import_javascript',
+ 'fdf_set_opt',
+ 'fdf_set_status',
+ 'fdf_set_submit_form_action',
+ 'fdf_set_target_frame',
+ 'fdf_set_value',
+ 'fdf_set_version'),
+ 'FPM': ('fastcgi_finish_request',),
+ 'FTP': ('ftp_alloc',
+ 'ftp_append',
+ 'ftp_cdup',
+ 'ftp_chdir',
+ 'ftp_chmod',
+ 'ftp_close',
+ 'ftp_connect',
+ 'ftp_delete',
+ 'ftp_exec',
+ 'ftp_fget',
+ 'ftp_fput',
+ 'ftp_get_option',
+ 'ftp_get',
+ 'ftp_login',
+ 'ftp_mdtm',
+ 'ftp_mkdir',
+ 'ftp_mlsd',
+ 'ftp_nb_continue',
+ 'ftp_nb_fget',
+ 'ftp_nb_fput',
+ 'ftp_nb_get',
+ 'ftp_nb_put',
+ 'ftp_nlist',
+ 'ftp_pasv',
+ 'ftp_put',
+ 'ftp_pwd',
+ 'ftp_quit',
+ 'ftp_raw',
+ 'ftp_rawlist',
+ 'ftp_rename',
+ 'ftp_rmdir',
+ 'ftp_set_option',
+ 'ftp_site',
+ 'ftp_size',
+ 'ftp_ssl_connect',
+ 'ftp_systype'),
+ 'Fann': ('fann_cascadetrain_on_data',
+ 'fann_cascadetrain_on_file',
+ 'fann_clear_scaling_params',
+ 'fann_copy',
+ 'fann_create_from_file',
+ 'fann_create_shortcut_array',
+ 'fann_create_shortcut',
+ 'fann_create_sparse_array',
+ 'fann_create_sparse',
+ 'fann_create_standard_array',
+ 'fann_create_standard',
+ 'fann_create_train_from_callback',
+ 'fann_create_train',
+ 'fann_descale_input',
+ 'fann_descale_output',
+ 'fann_descale_train',
+ 'fann_destroy_train',
+ 'fann_destroy',
+ 'fann_duplicate_train_data',
+ 'fann_get_activation_function',
+ 'fann_get_activation_steepness',
+ 'fann_get_bias_array',
+ 'fann_get_bit_fail_limit',
+ 'fann_get_bit_fail',
+ 'fann_get_cascade_activation_functions_count',
+ 'fann_get_cascade_activation_functions',
+ 'fann_get_cascade_activation_steepnesses_count',
+ 'fann_get_cascade_activation_steepnesses',
+ 'fann_get_cascade_candidate_change_fraction',
+ 'fann_get_cascade_candidate_limit',
+ 'fann_get_cascade_candidate_stagnation_epochs',
+ 'fann_get_cascade_max_cand_epochs',
+ 'fann_get_cascade_max_out_epochs',
+ 'fann_get_cascade_min_cand_epochs',
+ 'fann_get_cascade_min_out_epochs',
+ 'fann_get_cascade_num_candidate_groups',
+ 'fann_get_cascade_num_candidates',
+ 'fann_get_cascade_output_change_fraction',
+ 'fann_get_cascade_output_stagnation_epochs',
+ 'fann_get_cascade_weight_multiplier',
+ 'fann_get_connection_array',
+ 'fann_get_connection_rate',
+ 'fann_get_errno',
+ 'fann_get_errstr',
+ 'fann_get_layer_array',
+ 'fann_get_learning_momentum',
+ 'fann_get_learning_rate',
+ 'fann_get_MSE',
+ 'fann_get_network_type',
+ 'fann_get_num_input',
+ 'fann_get_num_layers',
+ 'fann_get_num_output',
+ 'fann_get_quickprop_decay',
+ 'fann_get_quickprop_mu',
+ 'fann_get_rprop_decrease_factor',
+ 'fann_get_rprop_delta_max',
+ 'fann_get_rprop_delta_min',
+ 'fann_get_rprop_delta_zero',
+ 'fann_get_rprop_increase_factor',
+ 'fann_get_sarprop_step_error_shift',
+ 'fann_get_sarprop_step_error_threshold_factor',
+ 'fann_get_sarprop_temperature',
+ 'fann_get_sarprop_weight_decay_shift',
+ 'fann_get_total_connections',
+ 'fann_get_total_neurons',
+ 'fann_get_train_error_function',
+ 'fann_get_train_stop_function',
+ 'fann_get_training_algorithm',
+ 'fann_init_weights',
+ 'fann_length_train_data',
+ 'fann_merge_train_data',
+ 'fann_num_input_train_data',
+ 'fann_num_output_train_data',
+ 'fann_print_error',
+ 'fann_randomize_weights',
+ 'fann_read_train_from_file',
+ 'fann_reset_errno',
+ 'fann_reset_errstr',
+ 'fann_reset_MSE',
+ 'fann_run',
+ 'fann_save_train',
+ 'fann_save',
+ 'fann_scale_input_train_data',
+ 'fann_scale_input',
+ 'fann_scale_output_train_data',
+ 'fann_scale_output',
+ 'fann_scale_train_data',
+ 'fann_scale_train',
+ 'fann_set_activation_function_hidden',
+ 'fann_set_activation_function_layer',
+ 'fann_set_activation_function_output',
+ 'fann_set_activation_function',
+ 'fann_set_activation_steepness_hidden',
+ 'fann_set_activation_steepness_layer',
+ 'fann_set_activation_steepness_output',
+ 'fann_set_activation_steepness',
+ 'fann_set_bit_fail_limit',
+ 'fann_set_callback',
+ 'fann_set_cascade_activation_functions',
+ 'fann_set_cascade_activation_steepnesses',
+ 'fann_set_cascade_candidate_change_fraction',
+ 'fann_set_cascade_candidate_limit',
+ 'fann_set_cascade_candidate_stagnation_epochs',
+ 'fann_set_cascade_max_cand_epochs',
+ 'fann_set_cascade_max_out_epochs',
+ 'fann_set_cascade_min_cand_epochs',
+ 'fann_set_cascade_min_out_epochs',
+ 'fann_set_cascade_num_candidate_groups',
+ 'fann_set_cascade_output_change_fraction',
+ 'fann_set_cascade_output_stagnation_epochs',
+ 'fann_set_cascade_weight_multiplier',
+ 'fann_set_error_log',
+ 'fann_set_input_scaling_params',
+ 'fann_set_learning_momentum',
+ 'fann_set_learning_rate',
+ 'fann_set_output_scaling_params',
+ 'fann_set_quickprop_decay',
+ 'fann_set_quickprop_mu',
+ 'fann_set_rprop_decrease_factor',
+ 'fann_set_rprop_delta_max',
+ 'fann_set_rprop_delta_min',
+ 'fann_set_rprop_delta_zero',
+ 'fann_set_rprop_increase_factor',
+ 'fann_set_sarprop_step_error_shift',
+ 'fann_set_sarprop_step_error_threshold_factor',
+ 'fann_set_sarprop_temperature',
+ 'fann_set_sarprop_weight_decay_shift',
+ 'fann_set_scaling_params',
+ 'fann_set_train_error_function',
+ 'fann_set_train_stop_function',
+ 'fann_set_training_algorithm',
+ 'fann_set_weight_array',
+ 'fann_set_weight',
+ 'fann_shuffle_train_data',
+ 'fann_subset_train_data',
+ 'fann_test_data',
+ 'fann_test',
+ 'fann_train_epoch',
+ 'fann_train_on_data',
+ 'fann_train_on_file',
+ 'fann_train'),
+ 'Fileinfo': ('finfo_buffer',
+ 'finfo_close',
+ 'finfo_file',
+ 'finfo_open',
+ 'finfo_set_flags',
+ 'mime_content_type'),
+ 'Filesystem': ('basename',
+ 'chgrp',
+ 'chmod',
+ 'chown',
+ 'clearstatcache',
+ 'copy',
+ 'dirname',
+ 'disk_free_space',
+ 'disk_total_space',
+ 'diskfreespace',
+ 'fclose',
+ 'fdatasync',
+ 'feof',
+ 'fflush',
+ 'fgetc',
+ 'fgetcsv',
+ 'fgets',
+ 'fgetss',
+ 'file_exists',
+ 'file_get_contents',
+ 'file_put_contents',
+ 'file',
+ 'fileatime',
+ 'filectime',
+ 'filegroup',
+ 'fileinode',
+ 'filemtime',
+ 'fileowner',
+ 'fileperms',
+ 'filesize',
+ 'filetype',
+ 'flock',
+ 'fnmatch',
+ 'fopen',
+ 'fpassthru',
+ 'fputcsv',
+ 'fputs',
+ 'fread',
+ 'fscanf',
+ 'fseek',
+ 'fstat',
+ 'fsync',
+ 'ftell',
+ 'ftruncate',
+ 'fwrite',
+ 'glob',
+ 'is_dir',
+ 'is_executable',
+ 'is_file',
+ 'is_link',
+ 'is_readable',
+ 'is_uploaded_file',
+ 'is_writable',
+ 'is_writeable',
+ 'lchgrp',
+ 'lchown',
+ 'link',
+ 'linkinfo',
+ 'lstat',
+ 'mkdir',
+ 'move_uploaded_file',
+ 'parse_ini_file',
+ 'parse_ini_string',
+ 'pathinfo',
+ 'pclose',
+ 'popen',
+ 'readfile',
+ 'readlink',
+ 'realpath_cache_get',
+ 'realpath_cache_size',
+ 'realpath',
+ 'rename',
+ 'rewind',
+ 'rmdir',
+ 'set_file_buffer',
+ 'stat',
+ 'symlink',
+ 'tempnam',
+ 'tmpfile',
+ 'touch',
+ 'umask',
+ 'unlink'),
+ 'Filter': ('filter_has_var',
+ 'filter_id',
+ 'filter_input_array',
+ 'filter_input',
+ 'filter_list',
+ 'filter_var_array',
+ 'filter_var'),
+ 'Firebird/InterBase': ('fbird_add_user',
+ 'fbird_affected_rows',
+ 'fbird_backup',
+ 'fbird_blob_add',
+ 'fbird_blob_cancel',
+ 'fbird_blob_close',
+ 'fbird_blob_create',
+ 'fbird_blob_echo',
+ 'fbird_blob_get',
+ 'fbird_blob_import',
+ 'fbird_blob_info',
+ 'fbird_blob_open',
+ 'fbird_close',
+ 'fbird_commit_ret',
+ 'fbird_commit',
+ 'fbird_connect',
+ 'fbird_db_info',
+ 'fbird_delete_user',
+ 'fbird_drop_db',
+ 'fbird_errcode',
+ 'fbird_errmsg',
+ 'fbird_execute',
+ 'fbird_fetch_assoc',
+ 'fbird_fetch_object',
+ 'fbird_fetch_row',
+ 'fbird_field_info',
+ 'fbird_free_event_handler',
+ 'fbird_free_query',
+ 'fbird_free_result',
+ 'fbird_gen_id',
+ 'fbird_maintain_db',
+ 'fbird_modify_user',
+ 'fbird_name_result',
+ 'fbird_num_fields',
+ 'fbird_num_params',
+ 'fbird_param_info',
+ 'fbird_pconnect',
+ 'fbird_prepare',
+ 'fbird_query',
+ 'fbird_restore',
+ 'fbird_rollback_ret',
+ 'fbird_rollback',
+ 'fbird_server_info',
+ 'fbird_service_attach',
+ 'fbird_service_detach',
+ 'fbird_set_event_handler',
+ 'fbird_trans',
+ 'fbird_wait_event',
+ 'ibase_add_user',
+ 'ibase_affected_rows',
+ 'ibase_backup',
+ 'ibase_blob_add',
+ 'ibase_blob_cancel',
+ 'ibase_blob_close',
+ 'ibase_blob_create',
+ 'ibase_blob_echo',
+ 'ibase_blob_get',
+ 'ibase_blob_import',
+ 'ibase_blob_info',
+ 'ibase_blob_open',
+ 'ibase_close',
+ 'ibase_commit_ret',
+ 'ibase_commit',
+ 'ibase_connect',
+ 'ibase_db_info',
+ 'ibase_delete_user',
+ 'ibase_drop_db',
+ 'ibase_errcode',
+ 'ibase_errmsg',
+ 'ibase_execute',
+ 'ibase_fetch_assoc',
+ 'ibase_fetch_object',
+ 'ibase_fetch_row',
+ 'ibase_field_info',
+ 'ibase_free_event_handler',
+ 'ibase_free_query',
+ 'ibase_free_result',
+ 'ibase_gen_id',
+ 'ibase_maintain_db',
+ 'ibase_modify_user',
+ 'ibase_name_result',
+ 'ibase_num_fields',
+ 'ibase_num_params',
+ 'ibase_param_info',
+ 'ibase_pconnect',
+ 'ibase_prepare',
+ 'ibase_query',
+ 'ibase_restore',
+ 'ibase_rollback_ret',
+ 'ibase_rollback',
+ 'ibase_server_info',
+ 'ibase_service_attach',
+ 'ibase_service_detach',
+ 'ibase_set_event_handler',
+ 'ibase_trans',
+ 'ibase_wait_event'),
+ 'Function handling': ('call_user_func_array',
+ 'call_user_func',
+ 'create_function',
+ 'forward_static_call_array',
+ 'forward_static_call',
+ 'func_get_arg',
+ 'func_get_args',
+ 'func_num_args',
+ 'function_exists',
+ 'get_defined_functions',
+ 'register_shutdown_function',
+ 'register_tick_function',
+ 'unregister_tick_function'),
+ 'GD and Image': ('gd_info',
+ 'getimagesize',
+ 'getimagesizefromstring',
+ 'image_type_to_extension',
+ 'image_type_to_mime_type',
+ 'image2wbmp',
+ 'imageaffine',
+ 'imageaffinematrixconcat',
+ 'imageaffinematrixget',
+ 'imagealphablending',
+ 'imageantialias',
+ 'imagearc',
+ 'imageavif',
+ 'imagebmp',
+ 'imagechar',
+ 'imagecharup',
+ 'imagecolorallocate',
+ 'imagecolorallocatealpha',
+ 'imagecolorat',
+ 'imagecolorclosest',
+ 'imagecolorclosestalpha',
+ 'imagecolorclosesthwb',
+ 'imagecolordeallocate',
+ 'imagecolorexact',
+ 'imagecolorexactalpha',
+ 'imagecolormatch',
+ 'imagecolorresolve',
+ 'imagecolorresolvealpha',
+ 'imagecolorset',
+ 'imagecolorsforindex',
+ 'imagecolorstotal',
+ 'imagecolortransparent',
+ 'imageconvolution',
+ 'imagecopy',
+ 'imagecopymerge',
+ 'imagecopymergegray',
+ 'imagecopyresampled',
+ 'imagecopyresized',
+ 'imagecreate',
+ 'imagecreatefromavif',
+ 'imagecreatefrombmp',
+ 'imagecreatefromgd2',
+ 'imagecreatefromgd2part',
+ 'imagecreatefromgd',
+ 'imagecreatefromgif',
+ 'imagecreatefromjpeg',
+ 'imagecreatefrompng',
+ 'imagecreatefromstring',
+ 'imagecreatefromtga',
+ 'imagecreatefromwbmp',
+ 'imagecreatefromwebp',
+ 'imagecreatefromxbm',
+ 'imagecreatefromxpm',
+ 'imagecreatetruecolor',
+ 'imagecrop',
+ 'imagecropauto',
+ 'imagedashedline',
+ 'imagedestroy',
+ 'imageellipse',
+ 'imagefill',
+ 'imagefilledarc',
+ 'imagefilledellipse',
+ 'imagefilledpolygon',
+ 'imagefilledrectangle',
+ 'imagefilltoborder',
+ 'imagefilter',
+ 'imageflip',
+ 'imagefontheight',
+ 'imagefontwidth',
+ 'imageftbbox',
+ 'imagefttext',
+ 'imagegammacorrect',
+ 'imagegd2',
+ 'imagegd',
+ 'imagegetclip',
+ 'imagegetinterpolation',
+ 'imagegif',
+ 'imagegrabscreen',
+ 'imagegrabwindow',
+ 'imageinterlace',
+ 'imageistruecolor',
+ 'imagejpeg',
+ 'imagelayereffect',
+ 'imageline',
+ 'imageloadfont',
+ 'imageopenpolygon',
+ 'imagepalettecopy',
+ 'imagepalettetotruecolor',
+ 'imagepng',
+ 'imagepolygon',
+ 'imagerectangle',
+ 'imageresolution',
+ 'imagerotate',
+ 'imagesavealpha',
+ 'imagescale',
+ 'imagesetbrush',
+ 'imagesetclip',
+ 'imagesetinterpolation',
+ 'imagesetpixel',
+ 'imagesetstyle',
+ 'imagesetthickness',
+ 'imagesettile',
+ 'imagestring',
+ 'imagestringup',
+ 'imagesx',
+ 'imagesy',
+ 'imagetruecolortopalette',
+ 'imagettfbbox',
+ 'imagettftext',
+ 'imagetypes',
+ 'imagewbmp',
+ 'imagewebp',
+ 'imagexbm',
+ 'iptcembed',
+ 'iptcparse',
+ 'jpeg2wbmp',
+ 'png2wbmp'),
+ 'GMP': ('gmp_abs',
+ 'gmp_add',
+ 'gmp_and',
+ 'gmp_binomial',
+ 'gmp_clrbit',
+ 'gmp_cmp',
+ 'gmp_com',
+ 'gmp_div_q',
+ 'gmp_div_qr',
+ 'gmp_div_r',
+ 'gmp_div',
+ 'gmp_divexact',
+ 'gmp_export',
+ 'gmp_fact',
+ 'gmp_gcd',
+ 'gmp_gcdext',
+ 'gmp_hamdist',
+ 'gmp_import',
+ 'gmp_init',
+ 'gmp_intval',
+ 'gmp_invert',
+ 'gmp_jacobi',
+ 'gmp_kronecker',
+ 'gmp_lcm',
+ 'gmp_legendre',
+ 'gmp_mod',
+ 'gmp_mul',
+ 'gmp_neg',
+ 'gmp_nextprime',
+ 'gmp_or',
+ 'gmp_perfect_power',
+ 'gmp_perfect_square',
+ 'gmp_popcount',
+ 'gmp_pow',
+ 'gmp_powm',
+ 'gmp_prob_prime',
+ 'gmp_random_bits',
+ 'gmp_random_range',
+ 'gmp_random_seed',
+ 'gmp_random',
+ 'gmp_root',
+ 'gmp_rootrem',
+ 'gmp_scan0',
+ 'gmp_scan1',
+ 'gmp_setbit',
+ 'gmp_sign',
+ 'gmp_sqrt',
+ 'gmp_sqrtrem',
+ 'gmp_strval',
+ 'gmp_sub',
+ 'gmp_testbit',
+ 'gmp_xor'),
+ 'GeoIP': ('geoip_asnum_by_name',
+ 'geoip_continent_code_by_name',
+ 'geoip_country_code_by_name',
+ 'geoip_country_code3_by_name',
+ 'geoip_country_name_by_name',
+ 'geoip_database_info',
+ 'geoip_db_avail',
+ 'geoip_db_filename',
+ 'geoip_db_get_all_info',
+ 'geoip_domain_by_name',
+ 'geoip_id_by_name',
+ 'geoip_isp_by_name',
+ 'geoip_netspeedcell_by_name',
+ 'geoip_org_by_name',
+ 'geoip_record_by_name',
+ 'geoip_region_by_name',
+ 'geoip_region_name_by_code',
+ 'geoip_setup_custom_directory',
+ 'geoip_time_zone_by_country_and_region'),
+ 'Gettext': ('bind_textdomain_codeset',
+ 'bindtextdomain',
+ 'dcgettext',
+ 'dcngettext',
+ 'dgettext',
+ 'dngettext',
+ 'gettext',
+ 'ngettext',
+ 'textdomain'),
+ 'GnuPG': ('gnupg_adddecryptkey',
+ 'gnupg_addencryptkey',
+ 'gnupg_addsignkey',
+ 'gnupg_cleardecryptkeys',
+ 'gnupg_clearencryptkeys',
+ 'gnupg_clearsignkeys',
+ 'gnupg_decrypt',
+ 'gnupg_decryptverify',
+ 'gnupg_encrypt',
+ 'gnupg_encryptsign',
+ 'gnupg_export',
+ 'gnupg_getengineinfo',
+ 'gnupg_geterror',
+ 'gnupg_geterrorinfo',
+ 'gnupg_getprotocol',
+ 'gnupg_import',
+ 'gnupg_init',
+ 'gnupg_keyinfo',
+ 'gnupg_setarmor',
+ 'gnupg_seterrormode',
+ 'gnupg_setsignmode',
+ 'gnupg_sign',
+ 'gnupg_verify'),
+ 'Grapheme': ('grapheme_extract',
+ 'grapheme_stripos',
+ 'grapheme_stristr',
+ 'grapheme_strlen',
+ 'grapheme_strpos',
+ 'grapheme_strripos',
+ 'grapheme_strrpos',
+ 'grapheme_strstr',
+ 'grapheme_substr'),
+ 'Hash': ('hash_algos',
+ 'hash_copy',
+ 'hash_equals',
+ 'hash_file',
+ 'hash_final',
+ 'hash_hkdf',
+ 'hash_hmac_algos',
+ 'hash_hmac_file',
+ 'hash_hmac',
+ 'hash_init',
+ 'hash_pbkdf2',
+ 'hash_update_file',
+ 'hash_update_stream',
+ 'hash_update',
+ 'hash'),
+ 'IBM DB2': ('db2_autocommit',
+ 'db2_bind_param',
+ 'db2_client_info',
+ 'db2_close',
+ 'db2_column_privileges',
+ 'db2_columns',
+ 'db2_commit',
+ 'db2_conn_error',
+ 'db2_conn_errormsg',
+ 'db2_connect',
+ 'db2_cursor_type',
+ 'db2_escape_string',
+ 'db2_exec',
+ 'db2_execute',
+ 'db2_fetch_array',
+ 'db2_fetch_assoc',
+ 'db2_fetch_both',
+ 'db2_fetch_object',
+ 'db2_fetch_row',
+ 'db2_field_display_size',
+ 'db2_field_name',
+ 'db2_field_num',
+ 'db2_field_precision',
+ 'db2_field_scale',
+ 'db2_field_type',
+ 'db2_field_width',
+ 'db2_foreign_keys',
+ 'db2_free_result',
+ 'db2_free_stmt',
+ 'db2_get_option',
+ 'db2_last_insert_id',
+ 'db2_lob_read',
+ 'db2_next_result',
+ 'db2_num_fields',
+ 'db2_num_rows',
+ 'db2_pclose',
+ 'db2_pconnect',
+ 'db2_prepare',
+ 'db2_primary_keys',
+ 'db2_procedure_columns',
+ 'db2_procedures',
+ 'db2_result',
+ 'db2_rollback',
+ 'db2_server_info',
+ 'db2_set_option',
+ 'db2_special_columns',
+ 'db2_statistics',
+ 'db2_stmt_error',
+ 'db2_stmt_errormsg',
+ 'db2_table_privileges',
+ 'db2_tables'),
+ 'IDN': ('idn_to_ascii', 'idn_to_utf8'),
+ 'IMAP': ('imap_8bit',
+ 'imap_alerts',
+ 'imap_append',
+ 'imap_base64',
+ 'imap_binary',
+ 'imap_body',
+ 'imap_bodystruct',
+ 'imap_check',
+ 'imap_clearflag_full',
+ 'imap_close',
+ 'imap_create',
+ 'imap_createmailbox',
+ 'imap_delete',
+ 'imap_deletemailbox',
+ 'imap_errors',
+ 'imap_expunge',
+ 'imap_fetch_overview',
+ 'imap_fetchbody',
+ 'imap_fetchheader',
+ 'imap_fetchmime',
+ 'imap_fetchstructure',
+ 'imap_fetchtext',
+ 'imap_gc',
+ 'imap_get_quota',
+ 'imap_get_quotaroot',
+ 'imap_getacl',
+ 'imap_getmailboxes',
+ 'imap_getsubscribed',
+ 'imap_header',
+ 'imap_headerinfo',
+ 'imap_headers',
+ 'imap_last_error',
+ 'imap_list',
+ 'imap_listmailbox',
+ 'imap_listscan',
+ 'imap_listsubscribed',
+ 'imap_lsub',
+ 'imap_mail_compose',
+ 'imap_mail_copy',
+ 'imap_mail_move',
+ 'imap_mail',
+ 'imap_mailboxmsginfo',
+ 'imap_mime_header_decode',
+ 'imap_msgno',
+ 'imap_mutf7_to_utf8',
+ 'imap_num_msg',
+ 'imap_num_recent',
+ 'imap_open',
+ 'imap_ping',
+ 'imap_qprint',
+ 'imap_rename',
+ 'imap_renamemailbox',
+ 'imap_reopen',
+ 'imap_rfc822_parse_adrlist',
+ 'imap_rfc822_parse_headers',
+ 'imap_rfc822_write_address',
+ 'imap_savebody',
+ 'imap_scan',
+ 'imap_scanmailbox',
+ 'imap_search',
+ 'imap_set_quota',
+ 'imap_setacl',
+ 'imap_setflag_full',
+ 'imap_sort',
+ 'imap_status',
+ 'imap_subscribe',
+ 'imap_thread',
+ 'imap_timeout',
+ 'imap_uid',
+ 'imap_undelete',
+ 'imap_unsubscribe',
+ 'imap_utf7_decode',
+ 'imap_utf7_encode',
+ 'imap_utf8_to_mutf7',
+ 'imap_utf8'),
+ 'Igbinary': ('igbinary_serialize', 'igbinary_unserialize'),
+ 'Inotify': ('inotify_add_watch',
+ 'inotify_init',
+ 'inotify_queue_len',
+ 'inotify_read',
+ 'inotify_rm_watch'),
+ 'JSON': ('json_decode',
+ 'json_encode',
+ 'json_last_error_msg',
+ 'json_last_error'),
+ 'LDAP': ('ldap_8859_to_t61',
+ 'ldap_add_ext',
+ 'ldap_add',
+ 'ldap_bind_ext',
+ 'ldap_bind',
+ 'ldap_close',
+ 'ldap_compare',
+ 'ldap_connect',
+ 'ldap_control_paged_result_response',
+ 'ldap_control_paged_result',
+ 'ldap_count_entries',
+ 'ldap_count_references',
+ 'ldap_delete_ext',
+ 'ldap_delete',
+ 'ldap_dn2ufn',
+ 'ldap_err2str',
+ 'ldap_errno',
+ 'ldap_error',
+ 'ldap_escape',
+ 'ldap_exop_passwd',
+ 'ldap_exop_refresh',
+ 'ldap_exop_whoami',
+ 'ldap_exop',
+ 'ldap_explode_dn',
+ 'ldap_first_attribute',
+ 'ldap_first_entry',
+ 'ldap_first_reference',
+ 'ldap_free_result',
+ 'ldap_get_attributes',
+ 'ldap_get_dn',
+ 'ldap_get_entries',
+ 'ldap_get_option',
+ 'ldap_get_values_len',
+ 'ldap_get_values',
+ 'ldap_list',
+ 'ldap_mod_add_ext',
+ 'ldap_mod_add',
+ 'ldap_mod_del_ext',
+ 'ldap_mod_del',
+ 'ldap_mod_replace_ext',
+ 'ldap_mod_replace',
+ 'ldap_modify_batch',
+ 'ldap_modify',
+ 'ldap_next_attribute',
+ 'ldap_next_entry',
+ 'ldap_next_reference',
+ 'ldap_parse_exop',
+ 'ldap_parse_reference',
+ 'ldap_parse_result',
+ 'ldap_read',
+ 'ldap_rename_ext',
+ 'ldap_rename',
+ 'ldap_sasl_bind',
+ 'ldap_search',
+ 'ldap_set_option',
+ 'ldap_set_rebind_proc',
+ 'ldap_sort',
+ 'ldap_start_tls',
+ 'ldap_t61_to_8859',
+ 'ldap_unbind'),
+ 'LZF': ('lzf_compress', 'lzf_decompress', 'lzf_optimized_for'),
+ 'Mail': ('ezmlm_hash', 'mail'),
+ 'Mailparse': ('mailparse_determine_best_xfer_encoding',
+ 'mailparse_msg_create',
+ 'mailparse_msg_extract_part_file',
+ 'mailparse_msg_extract_part',
+ 'mailparse_msg_extract_whole_part_file',
+ 'mailparse_msg_free',
+ 'mailparse_msg_get_part_data',
+ 'mailparse_msg_get_part',
+ 'mailparse_msg_get_structure',
+ 'mailparse_msg_parse_file',
+ 'mailparse_msg_parse',
+ 'mailparse_rfc822_parse_addresses',
+ 'mailparse_stream_encode',
+ 'mailparse_uudecode_all'),
+ 'Math': ('abs',
+ 'acos',
+ 'acosh',
+ 'asin',
+ 'asinh',
+ 'atan2',
+ 'atan',
+ 'atanh',
+ 'base_convert',
+ 'bindec',
+ 'ceil',
+ 'cos',
+ 'cosh',
+ 'decbin',
+ 'dechex',
+ 'decoct',
+ 'deg2rad',
+ 'exp',
+ 'expm1',
+ 'fdiv',
+ 'floor',
+ 'fmod',
+ 'getrandmax',
+ 'hexdec',
+ 'hypot',
+ 'intdiv',
+ 'is_finite',
+ 'is_infinite',
+ 'is_nan',
+ 'lcg_value',
+ 'log10',
+ 'log1p',
+ 'log',
+ 'max',
+ 'min',
+ 'mt_getrandmax',
+ 'mt_rand',
+ 'mt_srand',
+ 'octdec',
+ 'pi',
+ 'pow',
+ 'rad2deg',
+ 'rand',
+ 'round',
+ 'sin',
+ 'sinh',
+ 'sqrt',
+ 'srand',
+ 'tan',
+ 'tanh'),
+ 'Mcrypt': ('mcrypt_create_iv',
+ 'mcrypt_decrypt',
+ 'mcrypt_enc_get_algorithms_name',
+ 'mcrypt_enc_get_block_size',
+ 'mcrypt_enc_get_iv_size',
+ 'mcrypt_enc_get_key_size',
+ 'mcrypt_enc_get_modes_name',
+ 'mcrypt_enc_get_supported_key_sizes',
+ 'mcrypt_enc_is_block_algorithm_mode',
+ 'mcrypt_enc_is_block_algorithm',
+ 'mcrypt_enc_is_block_mode',
+ 'mcrypt_enc_self_test',
+ 'mcrypt_encrypt',
+ 'mcrypt_generic_deinit',
+ 'mcrypt_generic_init',
+ 'mcrypt_generic',
+ 'mcrypt_get_block_size',
+ 'mcrypt_get_cipher_name',
+ 'mcrypt_get_iv_size',
+ 'mcrypt_get_key_size',
+ 'mcrypt_list_algorithms',
+ 'mcrypt_list_modes',
+ 'mcrypt_module_close',
+ 'mcrypt_module_get_algo_block_size',
+ 'mcrypt_module_get_algo_key_size',
+ 'mcrypt_module_get_supported_key_sizes',
+ 'mcrypt_module_is_block_algorithm_mode',
+ 'mcrypt_module_is_block_algorithm',
+ 'mcrypt_module_is_block_mode',
+ 'mcrypt_module_open',
+ 'mcrypt_module_self_test',
+ 'mdecrypt_generic'),
+ 'Memcache': ('memcache_debug',),
+ 'Mhash': ('mhash_count',
+ 'mhash_get_block_size',
+ 'mhash_get_hash_name',
+ 'mhash_keygen_s2k',
+ 'mhash'),
+ 'Misc.': ('connection_aborted',
+ 'connection_status',
+ 'constant',
+ 'define',
+ 'defined',
+ 'die',
+ 'eval',
+ 'exit',
+ 'get_browser',
+ '__halt_compiler',
+ 'highlight_file',
+ 'highlight_string',
+ 'hrtime',
+ 'ignore_user_abort',
+ 'pack',
+ 'php_strip_whitespace',
+ 'sapi_windows_cp_conv',
+ 'sapi_windows_cp_get',
+ 'sapi_windows_cp_is_utf8',
+ 'sapi_windows_cp_set',
+ 'sapi_windows_generate_ctrl_event',
+ 'sapi_windows_set_ctrl_handler',
+ 'sapi_windows_vt100_support',
+ 'show_source',
+ 'sleep',
+ 'sys_getloadavg',
+ 'time_nanosleep',
+ 'time_sleep_until',
+ 'uniqid',
+ 'unpack',
+ 'usleep'),
+ 'Multibyte String': ('mb_check_encoding',
+ 'mb_chr',
+ 'mb_convert_case',
+ 'mb_convert_encoding',
+ 'mb_convert_kana',
+ 'mb_convert_variables',
+ 'mb_decode_mimeheader',
+ 'mb_decode_numericentity',
+ 'mb_detect_encoding',
+ 'mb_detect_order',
+ 'mb_encode_mimeheader',
+ 'mb_encode_numericentity',
+ 'mb_encoding_aliases',
+ 'mb_ereg_match',
+ 'mb_ereg_replace_callback',
+ 'mb_ereg_replace',
+ 'mb_ereg_search_getpos',
+ 'mb_ereg_search_getregs',
+ 'mb_ereg_search_init',
+ 'mb_ereg_search_pos',
+ 'mb_ereg_search_regs',
+ 'mb_ereg_search_setpos',
+ 'mb_ereg_search',
+ 'mb_ereg',
+ 'mb_eregi_replace',
+ 'mb_eregi',
+ 'mb_get_info',
+ 'mb_http_input',
+ 'mb_http_output',
+ 'mb_internal_encoding',
+ 'mb_language',
+ 'mb_list_encodings',
+ 'mb_ord',
+ 'mb_output_handler',
+ 'mb_parse_str',
+ 'mb_preferred_mime_name',
+ 'mb_regex_encoding',
+ 'mb_regex_set_options',
+ 'mb_scrub',
+ 'mb_send_mail',
+ 'mb_split',
+ 'mb_str_split',
+ 'mb_strcut',
+ 'mb_strimwidth',
+ 'mb_stripos',
+ 'mb_stristr',
+ 'mb_strlen',
+ 'mb_strpos',
+ 'mb_strrchr',
+ 'mb_strrichr',
+ 'mb_strripos',
+ 'mb_strrpos',
+ 'mb_strstr',
+ 'mb_strtolower',
+ 'mb_strtoupper',
+ 'mb_strwidth',
+ 'mb_substitute_character',
+ 'mb_substr_count',
+ 'mb_substr'),
+ 'MySQL': ('mysql_affected_rows',
+ 'mysql_client_encoding',
+ 'mysql_close',
+ 'mysql_connect',
+ 'mysql_create_db',
+ 'mysql_data_seek',
+ 'mysql_db_name',
+ 'mysql_db_query',
+ 'mysql_drop_db',
+ 'mysql_errno',
+ 'mysql_error',
+ 'mysql_escape_string',
+ 'mysql_fetch_array',
+ 'mysql_fetch_assoc',
+ 'mysql_fetch_field',
+ 'mysql_fetch_lengths',
+ 'mysql_fetch_object',
+ 'mysql_fetch_row',
+ 'mysql_field_flags',
+ 'mysql_field_len',
+ 'mysql_field_name',
+ 'mysql_field_seek',
+ 'mysql_field_table',
+ 'mysql_field_type',
+ 'mysql_free_result',
+ 'mysql_get_client_info',
+ 'mysql_get_host_info',
+ 'mysql_get_proto_info',
+ 'mysql_get_server_info',
+ 'mysql_info',
+ 'mysql_insert_id',
+ 'mysql_list_dbs',
+ 'mysql_list_fields',
+ 'mysql_list_processes',
+ 'mysql_list_tables',
+ 'mysql_num_fields',
+ 'mysql_num_rows',
+ 'mysql_pconnect',
+ 'mysql_ping',
+ 'mysql_query',
+ 'mysql_real_escape_string',
+ 'mysql_result',
+ 'mysql_select_db',
+ 'mysql_set_charset',
+ 'mysql_stat',
+ 'mysql_tablename',
+ 'mysql_thread_id',
+ 'mysql_unbuffered_query'),
+ 'Mysql_xdevapi': ('expression', 'getSession'),
+ 'Network': ('checkdnsrr',
+ 'closelog',
+ 'dns_check_record',
+ 'dns_get_mx',
+ 'dns_get_record',
+ 'fsockopen',
+ 'gethostbyaddr',
+ 'gethostbyname',
+ 'gethostbynamel',
+ 'gethostname',
+ 'getmxrr',
+ 'getprotobyname',
+ 'getprotobynumber',
+ 'getservbyname',
+ 'getservbyport',
+ 'header_register_callback',
+ 'header_remove',
+ 'header',
+ 'headers_list',
+ 'headers_sent',
+ 'http_response_code',
+ 'inet_ntop',
+ 'inet_pton',
+ 'ip2long',
+ 'long2ip',
+ 'net_get_interfaces',
+ 'openlog',
+ 'pfsockopen',
+ 'setcookie',
+ 'setrawcookie',
+ 'socket_get_status',
+ 'socket_set_blocking',
+ 'socket_set_timeout',
+ 'syslog'),
+ 'OAuth': ('oauth_get_sbs', 'oauth_urlencode'),
+ 'OCI8': ('oci_bind_array_by_name',
+ 'oci_bind_by_name',
+ 'oci_cancel',
+ 'oci_client_version',
+ 'oci_close',
+ 'oci_commit',
+ 'oci_connect',
+ 'oci_define_by_name',
+ 'oci_error',
+ 'oci_execute',
+ 'oci_fetch_all',
+ 'oci_fetch_array',
+ 'oci_fetch_assoc',
+ 'oci_fetch_object',
+ 'oci_fetch_row',
+ 'oci_fetch',
+ 'oci_field_is_null',
+ 'oci_field_name',
+ 'oci_field_precision',
+ 'oci_field_scale',
+ 'oci_field_size',
+ 'oci_field_type_raw',
+ 'oci_field_type',
+ 'oci_free_descriptor',
+ 'oci_free_statement',
+ 'oci_get_implicit_resultset',
+ 'oci_lob_copy',
+ 'oci_lob_is_equal',
+ 'oci_new_collection',
+ 'oci_new_connect',
+ 'oci_new_cursor',
+ 'oci_new_descriptor',
+ 'oci_num_fields',
+ 'oci_num_rows',
+ 'oci_parse',
+ 'oci_password_change',
+ 'oci_pconnect',
+ 'oci_register_taf_callback',
+ 'oci_result',
+ 'oci_rollback',
+ 'oci_server_version',
+ 'oci_set_action',
+ 'oci_set_call_timeout',
+ 'oci_set_client_identifier',
+ 'oci_set_client_info',
+ 'oci_set_db_operation',
+ 'oci_set_edition',
+ 'oci_set_module_name',
+ 'oci_set_prefetch_lob',
+ 'oci_set_prefetch',
+ 'oci_statement_type',
+ 'oci_unregister_taf_callback'),
+ 'ODBC': ('odbc_autocommit',
+ 'odbc_binmode',
+ 'odbc_close_all',
+ 'odbc_close',
+ 'odbc_columnprivileges',
+ 'odbc_columns',
+ 'odbc_commit',
+ 'odbc_connect',
+ 'odbc_cursor',
+ 'odbc_data_source',
+ 'odbc_do',
+ 'odbc_error',
+ 'odbc_errormsg',
+ 'odbc_exec',
+ 'odbc_execute',
+ 'odbc_fetch_array',
+ 'odbc_fetch_into',
+ 'odbc_fetch_object',
+ 'odbc_fetch_row',
+ 'odbc_field_len',
+ 'odbc_field_name',
+ 'odbc_field_num',
+ 'odbc_field_precision',
+ 'odbc_field_scale',
+ 'odbc_field_type',
+ 'odbc_foreignkeys',
+ 'odbc_free_result',
+ 'odbc_gettypeinfo',
+ 'odbc_longreadlen',
+ 'odbc_next_result',
+ 'odbc_num_fields',
+ 'odbc_num_rows',
+ 'odbc_pconnect',
+ 'odbc_prepare',
+ 'odbc_primarykeys',
+ 'odbc_procedurecolumns',
+ 'odbc_procedures',
+ 'odbc_result_all',
+ 'odbc_result',
+ 'odbc_rollback',
+ 'odbc_setoption',
+ 'odbc_specialcolumns',
+ 'odbc_statistics',
+ 'odbc_tableprivileges',
+ 'odbc_tables'),
+ 'OPcache': ('opcache_compile_file',
+ 'opcache_get_configuration',
+ 'opcache_get_status',
+ 'opcache_invalidate',
+ 'opcache_is_script_cached',
+ 'opcache_reset'),
+ 'OpenAL': ('openal_buffer_create',
+ 'openal_buffer_data',
+ 'openal_buffer_destroy',
+ 'openal_buffer_get',
+ 'openal_buffer_loadwav',
+ 'openal_context_create',
+ 'openal_context_current',
+ 'openal_context_destroy',
+ 'openal_context_process',
+ 'openal_context_suspend',
+ 'openal_device_close',
+ 'openal_device_open',
+ 'openal_listener_get',
+ 'openal_listener_set',
+ 'openal_source_create',
+ 'openal_source_destroy',
+ 'openal_source_get',
+ 'openal_source_pause',
+ 'openal_source_play',
+ 'openal_source_rewind',
+ 'openal_source_set',
+ 'openal_source_stop',
+ 'openal_stream'),
+ 'OpenSSL': ('openssl_cipher_iv_length',
+ 'openssl_cms_decrypt',
+ 'openssl_cms_encrypt',
+ 'openssl_cms_read',
+ 'openssl_cms_sign',
+ 'openssl_cms_verify',
+ 'openssl_csr_export_to_file',
+ 'openssl_csr_export',
+ 'openssl_csr_get_public_key',
+ 'openssl_csr_get_subject',
+ 'openssl_csr_new',
+ 'openssl_csr_sign',
+ 'openssl_decrypt',
+ 'openssl_dh_compute_key',
+ 'openssl_digest',
+ 'openssl_encrypt',
+ 'openssl_error_string',
+ 'openssl_free_key',
+ 'openssl_get_cert_locations',
+ 'openssl_get_cipher_methods',
+ 'openssl_get_curve_names',
+ 'openssl_get_md_methods',
+ 'openssl_get_privatekey',
+ 'openssl_get_publickey',
+ 'openssl_open',
+ 'openssl_pbkdf2',
+ 'openssl_pkcs12_export_to_file',
+ 'openssl_pkcs12_export',
+ 'openssl_pkcs12_read',
+ 'openssl_pkcs7_decrypt',
+ 'openssl_pkcs7_encrypt',
+ 'openssl_pkcs7_read',
+ 'openssl_pkcs7_sign',
+ 'openssl_pkcs7_verify',
+ 'openssl_pkey_derive',
+ 'openssl_pkey_export_to_file',
+ 'openssl_pkey_export',
+ 'openssl_pkey_free',
+ 'openssl_pkey_get_details',
+ 'openssl_pkey_get_private',
+ 'openssl_pkey_get_public',
+ 'openssl_pkey_new',
+ 'openssl_private_decrypt',
+ 'openssl_private_encrypt',
+ 'openssl_public_decrypt',
+ 'openssl_public_encrypt',
+ 'openssl_random_pseudo_bytes',
+ 'openssl_seal',
+ 'openssl_sign',
+ 'openssl_spki_export_challenge',
+ 'openssl_spki_export',
+ 'openssl_spki_new',
+ 'openssl_spki_verify',
+ 'openssl_verify',
+ 'openssl_x509_check_private_key',
+ 'openssl_x509_checkpurpose',
+ 'openssl_x509_export_to_file',
+ 'openssl_x509_export',
+ 'openssl_x509_fingerprint',
+ 'openssl_x509_free',
+ 'openssl_x509_parse',
+ 'openssl_x509_read',
+ 'openssl_x509_verify'),
+ 'Output Control': ('flush',
+ 'ob_clean',
+ 'ob_end_clean',
+ 'ob_end_flush',
+ 'ob_flush',
+ 'ob_get_clean',
+ 'ob_get_contents',
+ 'ob_get_flush',
+ 'ob_get_length',
+ 'ob_get_level',
+ 'ob_get_status',
+ 'ob_gzhandler',
+ 'ob_implicit_flush',
+ 'ob_list_handlers',
+ 'ob_start',
+ 'output_add_rewrite_var',
+ 'output_reset_rewrite_vars'),
+ 'PCNTL': ('pcntl_alarm',
+ 'pcntl_async_signals',
+ 'pcntl_errno',
+ 'pcntl_exec',
+ 'pcntl_fork',
+ 'pcntl_get_last_error',
+ 'pcntl_getpriority',
+ 'pcntl_setpriority',
+ 'pcntl_signal_dispatch',
+ 'pcntl_signal_get_handler',
+ 'pcntl_signal',
+ 'pcntl_sigprocmask',
+ 'pcntl_sigtimedwait',
+ 'pcntl_sigwaitinfo',
+ 'pcntl_strerror',
+ 'pcntl_wait',
+ 'pcntl_waitpid',
+ 'pcntl_wexitstatus',
+ 'pcntl_wifexited',
+ 'pcntl_wifsignaled',
+ 'pcntl_wifstopped',
+ 'pcntl_wstopsig',
+ 'pcntl_wtermsig'),
+ 'PCRE': ('preg_filter',
+ 'preg_grep',
+ 'preg_last_error_msg',
+ 'preg_last_error',
+ 'preg_match_all',
+ 'preg_match',
+ 'preg_quote',
+ 'preg_replace_callback_array',
+ 'preg_replace_callback',
+ 'preg_replace',
+ 'preg_split'),
+ 'PHP Options/Info': ('assert_options',
+ 'assert',
+ 'cli_get_process_title',
+ 'cli_set_process_title',
+ 'dl',
+ 'extension_loaded',
+ 'gc_collect_cycles',
+ 'gc_disable',
+ 'gc_enable',
+ 'gc_enabled',
+ 'gc_mem_caches',
+ 'gc_status',
+ 'get_cfg_var',
+ 'get_current_user',
+ 'get_defined_constants',
+ 'get_extension_funcs',
+ 'get_include_path',
+ 'get_included_files',
+ 'get_loaded_extensions',
+ 'get_magic_quotes_gpc',
+ 'get_magic_quotes_runtime',
+ 'get_required_files',
+ 'get_resources',
+ 'getenv',
+ 'getlastmod',
+ 'getmygid',
+ 'getmyinode',
+ 'getmypid',
+ 'getmyuid',
+ 'getopt',
+ 'getrusage',
+ 'ini_alter',
+ 'ini_get_all',
+ 'ini_get',
+ 'ini_restore',
+ 'ini_set',
+ 'memory_get_peak_usage',
+ 'memory_get_usage',
+ 'php_ini_loaded_file',
+ 'php_ini_scanned_files',
+ 'php_sapi_name',
+ 'php_uname',
+ 'phpcredits',
+ 'phpinfo',
+ 'phpversion',
+ 'putenv',
+ 'restore_include_path',
+ 'set_include_path',
+ 'set_time_limit',
+ 'sys_get_temp_dir',
+ 'version_compare',
+ 'zend_thread_id',
+ 'zend_version'),
+ 'POSIX': ('posix_access',
+ 'posix_ctermid',
+ 'posix_errno',
+ 'posix_get_last_error',
+ 'posix_getcwd',
+ 'posix_getegid',
+ 'posix_geteuid',
+ 'posix_getgid',
+ 'posix_getgrgid',
+ 'posix_getgrnam',
+ 'posix_getgroups',
+ 'posix_getlogin',
+ 'posix_getpgid',
+ 'posix_getpgrp',
+ 'posix_getpid',
+ 'posix_getppid',
+ 'posix_getpwnam',
+ 'posix_getpwuid',
+ 'posix_getrlimit',
+ 'posix_getsid',
+ 'posix_getuid',
+ 'posix_initgroups',
+ 'posix_isatty',
+ 'posix_kill',
+ 'posix_mkfifo',
+ 'posix_mknod',
+ 'posix_setegid',
+ 'posix_seteuid',
+ 'posix_setgid',
+ 'posix_setpgid',
+ 'posix_setrlimit',
+ 'posix_setsid',
+ 'posix_setuid',
+ 'posix_strerror',
+ 'posix_times',
+ 'posix_ttyname',
+ 'posix_uname'),
+ 'PS': ('ps_add_bookmark',
+ 'ps_add_launchlink',
+ 'ps_add_locallink',
+ 'ps_add_note',
+ 'ps_add_pdflink',
+ 'ps_add_weblink',
+ 'ps_arc',
+ 'ps_arcn',
+ 'ps_begin_page',
+ 'ps_begin_pattern',
+ 'ps_begin_template',
+ 'ps_circle',
+ 'ps_clip',
+ 'ps_close_image',
+ 'ps_close',
+ 'ps_closepath_stroke',
+ 'ps_closepath',
+ 'ps_continue_text',
+ 'ps_curveto',
+ 'ps_delete',
+ 'ps_end_page',
+ 'ps_end_pattern',
+ 'ps_end_template',
+ 'ps_fill_stroke',
+ 'ps_fill',
+ 'ps_findfont',
+ 'ps_get_buffer',
+ 'ps_get_parameter',
+ 'ps_get_value',
+ 'ps_hyphenate',
+ 'ps_include_file',
+ 'ps_lineto',
+ 'ps_makespotcolor',
+ 'ps_moveto',
+ 'ps_new',
+ 'ps_open_file',
+ 'ps_open_image_file',
+ 'ps_open_image',
+ 'ps_open_memory_image',
+ 'ps_place_image',
+ 'ps_rect',
+ 'ps_restore',
+ 'ps_rotate',
+ 'ps_save',
+ 'ps_scale',
+ 'ps_set_border_color',
+ 'ps_set_border_dash',
+ 'ps_set_border_style',
+ 'ps_set_info',
+ 'ps_set_parameter',
+ 'ps_set_text_pos',
+ 'ps_set_value',
+ 'ps_setcolor',
+ 'ps_setdash',
+ 'ps_setflat',
+ 'ps_setfont',
+ 'ps_setgray',
+ 'ps_setlinecap',
+ 'ps_setlinejoin',
+ 'ps_setlinewidth',
+ 'ps_setmiterlimit',
+ 'ps_setoverprintmode',
+ 'ps_setpolydash',
+ 'ps_shading_pattern',
+ 'ps_shading',
+ 'ps_shfill',
+ 'ps_show_boxed',
+ 'ps_show_xy2',
+ 'ps_show_xy',
+ 'ps_show2',
+ 'ps_show',
+ 'ps_string_geometry',
+ 'ps_stringwidth',
+ 'ps_stroke',
+ 'ps_symbol_name',
+ 'ps_symbol_width',
+ 'ps_symbol',
+ 'ps_translate'),
+ 'Password Hashing': ('password_algos',
+ 'password_get_info',
+ 'password_hash',
+ 'password_needs_rehash',
+ 'password_verify'),
+ 'PostgreSQL': ('pg_affected_rows',
+ 'pg_cancel_query',
+ 'pg_client_encoding',
+ 'pg_close',
+ 'pg_connect_poll',
+ 'pg_connect',
+ 'pg_connection_busy',
+ 'pg_connection_reset',
+ 'pg_connection_status',
+ 'pg_consume_input',
+ 'pg_convert',
+ 'pg_copy_from',
+ 'pg_copy_to',
+ 'pg_dbname',
+ 'pg_delete',
+ 'pg_end_copy',
+ 'pg_escape_bytea',
+ 'pg_escape_identifier',
+ 'pg_escape_literal',
+ 'pg_escape_string',
+ 'pg_execute',
+ 'pg_fetch_all_columns',
+ 'pg_fetch_all',
+ 'pg_fetch_array',
+ 'pg_fetch_assoc',
+ 'pg_fetch_object',
+ 'pg_fetch_result',
+ 'pg_fetch_row',
+ 'pg_field_is_null',
+ 'pg_field_name',
+ 'pg_field_num',
+ 'pg_field_prtlen',
+ 'pg_field_size',
+ 'pg_field_table',
+ 'pg_field_type_oid',
+ 'pg_field_type',
+ 'pg_flush',
+ 'pg_free_result',
+ 'pg_get_notify',
+ 'pg_get_pid',
+ 'pg_get_result',
+ 'pg_host',
+ 'pg_insert',
+ 'pg_last_error',
+ 'pg_last_notice',
+ 'pg_last_oid',
+ 'pg_lo_close',
+ 'pg_lo_create',
+ 'pg_lo_export',
+ 'pg_lo_import',
+ 'pg_lo_open',
+ 'pg_lo_read_all',
+ 'pg_lo_read',
+ 'pg_lo_seek',
+ 'pg_lo_tell',
+ 'pg_lo_truncate',
+ 'pg_lo_unlink',
+ 'pg_lo_write',
+ 'pg_meta_data',
+ 'pg_num_fields',
+ 'pg_num_rows',
+ 'pg_options',
+ 'pg_parameter_status',
+ 'pg_pconnect',
+ 'pg_ping',
+ 'pg_port',
+ 'pg_prepare',
+ 'pg_put_line',
+ 'pg_query_params',
+ 'pg_query',
+ 'pg_result_error_field',
+ 'pg_result_error',
+ 'pg_result_seek',
+ 'pg_result_status',
+ 'pg_select',
+ 'pg_send_execute',
+ 'pg_send_prepare',
+ 'pg_send_query_params',
+ 'pg_send_query',
+ 'pg_set_client_encoding',
+ 'pg_set_error_verbosity',
+ 'pg_socket',
+ 'pg_trace',
+ 'pg_transaction_status',
+ 'pg_tty',
+ 'pg_unescape_bytea',
+ 'pg_untrace',
+ 'pg_update',
+ 'pg_version'),
+ 'Program execution': ('escapeshellarg',
+ 'escapeshellcmd',
+ 'exec',
+ 'passthru',
+ 'proc_close',
+ 'proc_get_status',
+ 'proc_nice',
+ 'proc_open',
+ 'proc_terminate',
+ 'shell_exec',
+ 'system'),
+ 'Pspell': ('pspell_add_to_personal',
+ 'pspell_add_to_session',
+ 'pspell_check',
+ 'pspell_clear_session',
+ 'pspell_config_create',
+ 'pspell_config_data_dir',
+ 'pspell_config_dict_dir',
+ 'pspell_config_ignore',
+ 'pspell_config_mode',
+ 'pspell_config_personal',
+ 'pspell_config_repl',
+ 'pspell_config_runtogether',
+ 'pspell_config_save_repl',
+ 'pspell_new_config',
+ 'pspell_new_personal',
+ 'pspell_new',
+ 'pspell_save_wordlist',
+ 'pspell_store_replacement',
+ 'pspell_suggest'),
+ 'RRD': ('rrd_create',
+ 'rrd_error',
+ 'rrd_fetch',
+ 'rrd_first',
+ 'rrd_graph',
+ 'rrd_info',
+ 'rrd_last',
+ 'rrd_lastupdate',
+ 'rrd_restore',
+ 'rrd_tune',
+ 'rrd_update',
+ 'rrd_version',
+ 'rrd_xport',
+ 'rrdc_disconnect'),
+ 'Radius': ('radius_acct_open',
+ 'radius_add_server',
+ 'radius_auth_open',
+ 'radius_close',
+ 'radius_config',
+ 'radius_create_request',
+ 'radius_cvt_addr',
+ 'radius_cvt_int',
+ 'radius_cvt_string',
+ 'radius_demangle_mppe_key',
+ 'radius_demangle',
+ 'radius_get_attr',
+ 'radius_get_tagged_attr_data',
+ 'radius_get_tagged_attr_tag',
+ 'radius_get_vendor_attr',
+ 'radius_put_addr',
+ 'radius_put_attr',
+ 'radius_put_int',
+ 'radius_put_string',
+ 'radius_put_vendor_addr',
+ 'radius_put_vendor_attr',
+ 'radius_put_vendor_int',
+ 'radius_put_vendor_string',
+ 'radius_request_authenticator',
+ 'radius_salt_encrypt_attr',
+ 'radius_send_request',
+ 'radius_server_secret',
+ 'radius_strerror'),
+ 'Rar': ('rar_wrapper_cache_stats',),
+ 'Readline': ('readline_add_history',
+ 'readline_callback_handler_install',
+ 'readline_callback_handler_remove',
+ 'readline_callback_read_char',
+ 'readline_clear_history',
+ 'readline_completion_function',
+ 'readline_info',
+ 'readline_list_history',
+ 'readline_on_new_line',
+ 'readline_read_history',
+ 'readline_redisplay',
+ 'readline_write_history',
+ 'readline'),
+ 'Recode': ('recode_file', 'recode_string', 'recode'),
+ 'RpmInfo': ('rpmaddtag', 'rpmdbinfo', 'rpmdbsearch', 'rpminfo', 'rpmvercmp'),
+ 'SNMP': ('snmp_get_quick_print',
+ 'snmp_get_valueretrieval',
+ 'snmp_read_mib',
+ 'snmp_set_enum_print',
+ 'snmp_set_oid_numeric_print',
+ 'snmp_set_oid_output_format',
+ 'snmp_set_quick_print',
+ 'snmp_set_valueretrieval',
+ 'snmp2_get',
+ 'snmp2_getnext',
+ 'snmp2_real_walk',
+ 'snmp2_set',
+ 'snmp2_walk',
+ 'snmp3_get',
+ 'snmp3_getnext',
+ 'snmp3_real_walk',
+ 'snmp3_set',
+ 'snmp3_walk',
+ 'snmpget',
+ 'snmpgetnext',
+ 'snmprealwalk',
+ 'snmpset',
+ 'snmpwalk',
+ 'snmpwalkoid'),
+ 'SOAP': ('is_soap_fault', 'use_soap_error_handler'),
+ 'SPL': ('class_implements',
+ 'class_parents',
+ 'class_uses',
+ 'iterator_apply',
+ 'iterator_count',
+ 'iterator_to_array',
+ 'spl_autoload_call',
+ 'spl_autoload_extensions',
+ 'spl_autoload_functions',
+ 'spl_autoload_register',
+ 'spl_autoload_unregister',
+ 'spl_autoload',
+ 'spl_classes',
+ 'spl_object_hash',
+ 'spl_object_id'),
+ 'SQLSRV': ('sqlsrv_begin_transaction',
+ 'sqlsrv_cancel',
+ 'sqlsrv_client_info',
+ 'sqlsrv_close',
+ 'sqlsrv_commit',
+ 'sqlsrv_configure',
+ 'sqlsrv_connect',
+ 'sqlsrv_errors',
+ 'sqlsrv_execute',
+ 'sqlsrv_fetch_array',
+ 'sqlsrv_fetch_object',
+ 'sqlsrv_fetch',
+ 'sqlsrv_field_metadata',
+ 'sqlsrv_free_stmt',
+ 'sqlsrv_get_config',
+ 'sqlsrv_get_field',
+ 'sqlsrv_has_rows',
+ 'sqlsrv_next_result',
+ 'sqlsrv_num_fields',
+ 'sqlsrv_num_rows',
+ 'sqlsrv_prepare',
+ 'sqlsrv_query',
+ 'sqlsrv_rollback',
+ 'sqlsrv_rows_affected',
+ 'sqlsrv_send_stream_data',
+ 'sqlsrv_server_info'),
+ 'SSH2': ('ssh2_auth_agent',
+ 'ssh2_auth_hostbased_file',
+ 'ssh2_auth_none',
+ 'ssh2_auth_password',
+ 'ssh2_auth_pubkey_file',
+ 'ssh2_connect',
+ 'ssh2_disconnect',
+ 'ssh2_exec',
+ 'ssh2_fetch_stream',
+ 'ssh2_fingerprint',
+ 'ssh2_forward_accept',
+ 'ssh2_forward_listen',
+ 'ssh2_methods_negotiated',
+ 'ssh2_poll',
+ 'ssh2_publickey_add',
+ 'ssh2_publickey_init',
+ 'ssh2_publickey_list',
+ 'ssh2_publickey_remove',
+ 'ssh2_scp_recv',
+ 'ssh2_scp_send',
+ 'ssh2_send_eof',
+ 'ssh2_sftp_chmod',
+ 'ssh2_sftp_lstat',
+ 'ssh2_sftp_mkdir',
+ 'ssh2_sftp_readlink',
+ 'ssh2_sftp_realpath',
+ 'ssh2_sftp_rename',
+ 'ssh2_sftp_rmdir',
+ 'ssh2_sftp_stat',
+ 'ssh2_sftp_symlink',
+ 'ssh2_sftp_unlink',
+ 'ssh2_sftp',
+ 'ssh2_shell',
+ 'ssh2_tunnel'),
+ 'SVN': ('svn_add',
+ 'svn_auth_get_parameter',
+ 'svn_auth_set_parameter',
+ 'svn_blame',
+ 'svn_cat',
+ 'svn_checkout',
+ 'svn_cleanup',
+ 'svn_client_version',
+ 'svn_commit',
+ 'svn_delete',
+ 'svn_diff',
+ 'svn_export',
+ 'svn_fs_abort_txn',
+ 'svn_fs_apply_text',
+ 'svn_fs_begin_txn2',
+ 'svn_fs_change_node_prop',
+ 'svn_fs_check_path',
+ 'svn_fs_contents_changed',
+ 'svn_fs_copy',
+ 'svn_fs_delete',
+ 'svn_fs_dir_entries',
+ 'svn_fs_file_contents',
+ 'svn_fs_file_length',
+ 'svn_fs_is_dir',
+ 'svn_fs_is_file',
+ 'svn_fs_make_dir',
+ 'svn_fs_make_file',
+ 'svn_fs_node_created_rev',
+ 'svn_fs_node_prop',
+ 'svn_fs_props_changed',
+ 'svn_fs_revision_prop',
+ 'svn_fs_revision_root',
+ 'svn_fs_txn_root',
+ 'svn_fs_youngest_rev',
+ 'svn_import',
+ 'svn_log',
+ 'svn_ls',
+ 'svn_mkdir',
+ 'svn_repos_create',
+ 'svn_repos_fs_begin_txn_for_commit',
+ 'svn_repos_fs_commit_txn',
+ 'svn_repos_fs',
+ 'svn_repos_hotcopy',
+ 'svn_repos_open',
+ 'svn_repos_recover',
+ 'svn_revert',
+ 'svn_status',
+ 'svn_update'),
+ 'Scoutapm': ('scoutapm_get_calls', 'scoutapm_list_instrumented_functions'),
+ 'Seaslog': ('seaslog_get_author', 'seaslog_get_version'),
+ 'Semaphore': ('ftok',
+ 'msg_get_queue',
+ 'msg_queue_exists',
+ 'msg_receive',
+ 'msg_remove_queue',
+ 'msg_send',
+ 'msg_set_queue',
+ 'msg_stat_queue',
+ 'sem_acquire',
+ 'sem_get',
+ 'sem_release',
+ 'sem_remove',
+ 'shm_attach',
+ 'shm_detach',
+ 'shm_get_var',
+ 'shm_has_var',
+ 'shm_put_var',
+ 'shm_remove_var',
+ 'shm_remove'),
+ 'Session': ('session_abort',
+ 'session_cache_expire',
+ 'session_cache_limiter',
+ 'session_commit',
+ 'session_create_id',
+ 'session_decode',
+ 'session_destroy',
+ 'session_encode',
+ 'session_gc',
+ 'session_get_cookie_params',
+ 'session_id',
+ 'session_module_name',
+ 'session_name',
+ 'session_regenerate_id',
+ 'session_register_shutdown',
+ 'session_reset',
+ 'session_save_path',
+ 'session_set_cookie_params',
+ 'session_set_save_handler',
+ 'session_start',
+ 'session_status',
+ 'session_unset',
+ 'session_write_close'),
+ 'Shared Memory': ('shmop_close',
+ 'shmop_delete',
+ 'shmop_open',
+ 'shmop_read',
+ 'shmop_size',
+ 'shmop_write'),
+ 'SimpleXML': ('simplexml_import_dom',
+ 'simplexml_load_file',
+ 'simplexml_load_string'),
+ 'Socket': ('socket_accept',
+ 'socket_addrinfo_bind',
+ 'socket_addrinfo_connect',
+ 'socket_addrinfo_explain',
+ 'socket_addrinfo_lookup',
+ 'socket_bind',
+ 'socket_clear_error',
+ 'socket_close',
+ 'socket_cmsg_space',
+ 'socket_connect',
+ 'socket_create_listen',
+ 'socket_create_pair',
+ 'socket_create',
+ 'socket_export_stream',
+ 'socket_get_option',
+ 'socket_getopt',
+ 'socket_getpeername',
+ 'socket_getsockname',
+ 'socket_import_stream',
+ 'socket_last_error',
+ 'socket_listen',
+ 'socket_read',
+ 'socket_recv',
+ 'socket_recvfrom',
+ 'socket_recvmsg',
+ 'socket_select',
+ 'socket_send',
+ 'socket_sendmsg',
+ 'socket_sendto',
+ 'socket_set_block',
+ 'socket_set_nonblock',
+ 'socket_set_option',
+ 'socket_setopt',
+ 'socket_shutdown',
+ 'socket_strerror',
+ 'socket_write',
+ 'socket_wsaprotocol_info_export',
+ 'socket_wsaprotocol_info_import',
+ 'socket_wsaprotocol_info_release'),
+ 'Sodium': ('sodium_add',
+ 'sodium_base642bin',
+ 'sodium_bin2base64',
+ 'sodium_bin2hex',
+ 'sodium_compare',
+ 'sodium_crypto_aead_aes256gcm_decrypt',
+ 'sodium_crypto_aead_aes256gcm_encrypt',
+ 'sodium_crypto_aead_aes256gcm_is_available',
+ 'sodium_crypto_aead_aes256gcm_keygen',
+ 'sodium_crypto_aead_chacha20poly1305_decrypt',
+ 'sodium_crypto_aead_chacha20poly1305_encrypt',
+ 'sodium_crypto_aead_chacha20poly1305_ietf_decrypt',
+ 'sodium_crypto_aead_chacha20poly1305_ietf_encrypt',
+ 'sodium_crypto_aead_chacha20poly1305_ietf_keygen',
+ 'sodium_crypto_aead_chacha20poly1305_keygen',
+ 'sodium_crypto_aead_xchacha20poly1305_ietf_decrypt',
+ 'sodium_crypto_aead_xchacha20poly1305_ietf_encrypt',
+ 'sodium_crypto_aead_xchacha20poly1305_ietf_keygen',
+ 'sodium_crypto_auth_keygen',
+ 'sodium_crypto_auth_verify',
+ 'sodium_crypto_auth',
+ 'sodium_crypto_box_keypair_from_secretkey_and_publickey',
+ 'sodium_crypto_box_keypair',
+ 'sodium_crypto_box_open',
+ 'sodium_crypto_box_publickey_from_secretkey',
+ 'sodium_crypto_box_publickey',
+ 'sodium_crypto_box_seal_open',
+ 'sodium_crypto_box_seal',
+ 'sodium_crypto_box_secretkey',
+ 'sodium_crypto_box_seed_keypair',
+ 'sodium_crypto_box',
+ 'sodium_crypto_generichash_final',
+ 'sodium_crypto_generichash_init',
+ 'sodium_crypto_generichash_keygen',
+ 'sodium_crypto_generichash_update',
+ 'sodium_crypto_generichash',
+ 'sodium_crypto_kdf_derive_from_key',
+ 'sodium_crypto_kdf_keygen',
+ 'sodium_crypto_kx_client_session_keys',
+ 'sodium_crypto_kx_keypair',
+ 'sodium_crypto_kx_publickey',
+ 'sodium_crypto_kx_secretkey',
+ 'sodium_crypto_kx_seed_keypair',
+ 'sodium_crypto_kx_server_session_keys',
+ 'sodium_crypto_pwhash_scryptsalsa208sha256_str_verify',
+ 'sodium_crypto_pwhash_scryptsalsa208sha256_str',
+ 'sodium_crypto_pwhash_scryptsalsa208sha256',
+ 'sodium_crypto_pwhash_str_needs_rehash',
+ 'sodium_crypto_pwhash_str_verify',
+ 'sodium_crypto_pwhash_str',
+ 'sodium_crypto_pwhash',
+ 'sodium_crypto_scalarmult_base',
+ 'sodium_crypto_scalarmult',
+ 'sodium_crypto_secretbox_keygen',
+ 'sodium_crypto_secretbox_open',
+ 'sodium_crypto_secretbox',
+ 'sodium_crypto_secretstream_xchacha20poly1305_init_pull',
+ 'sodium_crypto_secretstream_xchacha20poly1305_init_push',
+ 'sodium_crypto_secretstream_xchacha20poly1305_keygen',
+ 'sodium_crypto_secretstream_xchacha20poly1305_pull',
+ 'sodium_crypto_secretstream_xchacha20poly1305_push',
+ 'sodium_crypto_secretstream_xchacha20poly1305_rekey',
+ 'sodium_crypto_shorthash_keygen',
+ 'sodium_crypto_shorthash',
+ 'sodium_crypto_sign_detached',
+ 'sodium_crypto_sign_ed25519_pk_to_curve25519',
+ 'sodium_crypto_sign_ed25519_sk_to_curve25519',
+ 'sodium_crypto_sign_keypair_from_secretkey_and_publickey',
+ 'sodium_crypto_sign_keypair',
+ 'sodium_crypto_sign_open',
+ 'sodium_crypto_sign_publickey_from_secretkey',
+ 'sodium_crypto_sign_publickey',
+ 'sodium_crypto_sign_secretkey',
+ 'sodium_crypto_sign_seed_keypair',
+ 'sodium_crypto_sign_verify_detached',
+ 'sodium_crypto_sign',
+ 'sodium_crypto_stream_keygen',
+ 'sodium_crypto_stream_xor',
+ 'sodium_crypto_stream',
+ 'sodium_hex2bin',
+ 'sodium_increment',
+ 'sodium_memcmp',
+ 'sodium_memzero',
+ 'sodium_pad',
+ 'sodium_unpad'),
+ 'Solr': ('solr_get_version',),
+ 'Stomp': ('stomp_connect_error', 'stomp_version'),
+ 'Stream': ('stream_bucket_append',
+ 'stream_bucket_make_writeable',
+ 'stream_bucket_new',
+ 'stream_bucket_prepend',
+ 'stream_context_create',
+ 'stream_context_get_default',
+ 'stream_context_get_options',
+ 'stream_context_get_params',
+ 'stream_context_set_default',
+ 'stream_context_set_option',
+ 'stream_context_set_params',
+ 'stream_copy_to_stream',
+ 'stream_filter_append',
+ 'stream_filter_prepend',
+ 'stream_filter_register',
+ 'stream_filter_remove',
+ 'stream_get_contents',
+ 'stream_get_filters',
+ 'stream_get_line',
+ 'stream_get_meta_data',
+ 'stream_get_transports',
+ 'stream_get_wrappers',
+ 'stream_is_local',
+ 'stream_isatty',
+ 'stream_notification_callback',
+ 'stream_register_wrapper',
+ 'stream_resolve_include_path',
+ 'stream_select',
+ 'stream_set_blocking',
+ 'stream_set_chunk_size',
+ 'stream_set_read_buffer',
+ 'stream_set_timeout',
+ 'stream_set_write_buffer',
+ 'stream_socket_accept',
+ 'stream_socket_client',
+ 'stream_socket_enable_crypto',
+ 'stream_socket_get_name',
+ 'stream_socket_pair',
+ 'stream_socket_recvfrom',
+ 'stream_socket_sendto',
+ 'stream_socket_server',
+ 'stream_socket_shutdown',
+ 'stream_supports_lock',
+ 'stream_wrapper_register',
+ 'stream_wrapper_restore',
+ 'stream_wrapper_unregister'),
+ 'String': ('addcslashes',
+ 'addslashes',
+ 'bin2hex',
+ 'chop',
+ 'chr',
+ 'chunk_split',
+ 'convert_cyr_string',
+ 'convert_uudecode',
+ 'convert_uuencode',
+ 'count_chars',
+ 'crc32',
+ 'crypt',
+ 'echo',
+ 'explode',
+ 'fprintf',
+ 'get_html_translation_table',
+ 'hebrev',
+ 'hebrevc',
+ 'hex2bin',
+ 'html_entity_decode',
+ 'htmlentities',
+ 'htmlspecialchars_decode',
+ 'htmlspecialchars',
+ 'implode',
+ 'join',
+ 'lcfirst',
+ 'levenshtein',
+ 'localeconv',
+ 'ltrim',
+ 'md5_file',
+ 'md5',
+ 'metaphone',
+ 'money_format',
+ 'nl_langinfo',
+ 'nl2br',
+ 'number_format',
+ 'ord',
+ 'parse_str',
+ 'print',
+ 'printf',
+ 'quoted_printable_decode',
+ 'quoted_printable_encode',
+ 'quotemeta',
+ 'rtrim',
+ 'setlocale',
+ 'sha1_file',
+ 'sha1',
+ 'similar_text',
+ 'soundex',
+ 'sprintf',
+ 'sscanf',
+ 'str_contains',
+ 'str_ends_with',
+ 'str_getcsv',
+ 'str_ireplace',
+ 'str_pad',
+ 'str_repeat',
+ 'str_replace',
+ 'str_rot13',
+ 'str_shuffle',
+ 'str_split',
+ 'str_starts_with',
+ 'str_word_count',
+ 'strcasecmp',
+ 'strchr',
+ 'strcmp',
+ 'strcoll',
+ 'strcspn',
+ 'strip_tags',
+ 'stripcslashes',
+ 'stripos',
+ 'stripslashes',
+ 'stristr',
+ 'strlen',
+ 'strnatcasecmp',
+ 'strnatcmp',
+ 'strncasecmp',
+ 'strncmp',
+ 'strpbrk',
+ 'strpos',
+ 'strrchr',
+ 'strrev',
+ 'strripos',
+ 'strrpos',
+ 'strspn',
+ 'strstr',
+ 'strtok',
+ 'strtolower',
+ 'strtoupper',
+ 'strtr',
+ 'substr_compare',
+ 'substr_count',
+ 'substr_replace',
+ 'substr',
+ 'trim',
+ 'ucfirst',
+ 'ucwords',
+ 'vfprintf',
+ 'vprintf',
+ 'vsprintf',
+ 'wordwrap'),
+ 'Swoole': ('swoole_async_dns_lookup',
+ 'swoole_async_read',
+ 'swoole_async_readfile',
+ 'swoole_async_set',
+ 'swoole_async_write',
+ 'swoole_async_writefile',
+ 'swoole_clear_error',
+ 'swoole_client_select',
+ 'swoole_cpu_num',
+ 'swoole_errno',
+ 'swoole_error_log',
+ 'swoole_event_add',
+ 'swoole_event_defer',
+ 'swoole_event_del',
+ 'swoole_event_exit',
+ 'swoole_event_set',
+ 'swoole_event_wait',
+ 'swoole_event_write',
+ 'swoole_get_local_ip',
+ 'swoole_last_error',
+ 'swoole_load_module',
+ 'swoole_select',
+ 'swoole_set_process_name',
+ 'swoole_strerror',
+ 'swoole_timer_after',
+ 'swoole_timer_exists',
+ 'swoole_timer_tick',
+ 'swoole_version'),
+ 'TCP': ('tcpwrap_check',),
+ 'Taint': ('is_tainted', 'taint', 'untaint'),
+ 'Tidy': ('ob_tidyhandler',
+ 'tidy_access_count',
+ 'tidy_config_count',
+ 'tidy_error_count',
+ 'tidy_get_output',
+ 'tidy_warning_count'),
+ 'Tokenizer': ('token_get_all', 'token_name'),
+ 'Trader': ('trader_acos',
+ 'trader_ad',
+ 'trader_add',
+ 'trader_adosc',
+ 'trader_adx',
+ 'trader_adxr',
+ 'trader_apo',
+ 'trader_aroon',
+ 'trader_aroonosc',
+ 'trader_asin',
+ 'trader_atan',
+ 'trader_atr',
+ 'trader_avgprice',
+ 'trader_bbands',
+ 'trader_beta',
+ 'trader_bop',
+ 'trader_cci',
+ 'trader_cdl2crows',
+ 'trader_cdl3blackcrows',
+ 'trader_cdl3inside',
+ 'trader_cdl3linestrike',
+ 'trader_cdl3outside',
+ 'trader_cdl3starsinsouth',
+ 'trader_cdl3whitesoldiers',
+ 'trader_cdlabandonedbaby',
+ 'trader_cdladvanceblock',
+ 'trader_cdlbelthold',
+ 'trader_cdlbreakaway',
+ 'trader_cdlclosingmarubozu',
+ 'trader_cdlconcealbabyswall',
+ 'trader_cdlcounterattack',
+ 'trader_cdldarkcloudcover',
+ 'trader_cdldoji',
+ 'trader_cdldojistar',
+ 'trader_cdldragonflydoji',
+ 'trader_cdlengulfing',
+ 'trader_cdleveningdojistar',
+ 'trader_cdleveningstar',
+ 'trader_cdlgapsidesidewhite',
+ 'trader_cdlgravestonedoji',
+ 'trader_cdlhammer',
+ 'trader_cdlhangingman',
+ 'trader_cdlharami',
+ 'trader_cdlharamicross',
+ 'trader_cdlhighwave',
+ 'trader_cdlhikkake',
+ 'trader_cdlhikkakemod',
+ 'trader_cdlhomingpigeon',
+ 'trader_cdlidentical3crows',
+ 'trader_cdlinneck',
+ 'trader_cdlinvertedhammer',
+ 'trader_cdlkicking',
+ 'trader_cdlkickingbylength',
+ 'trader_cdlladderbottom',
+ 'trader_cdllongleggeddoji',
+ 'trader_cdllongline',
+ 'trader_cdlmarubozu',
+ 'trader_cdlmatchinglow',
+ 'trader_cdlmathold',
+ 'trader_cdlmorningdojistar',
+ 'trader_cdlmorningstar',
+ 'trader_cdlonneck',
+ 'trader_cdlpiercing',
+ 'trader_cdlrickshawman',
+ 'trader_cdlrisefall3methods',
+ 'trader_cdlseparatinglines',
+ 'trader_cdlshootingstar',
+ 'trader_cdlshortline',
+ 'trader_cdlspinningtop',
+ 'trader_cdlstalledpattern',
+ 'trader_cdlsticksandwich',
+ 'trader_cdltakuri',
+ 'trader_cdltasukigap',
+ 'trader_cdlthrusting',
+ 'trader_cdltristar',
+ 'trader_cdlunique3river',
+ 'trader_cdlupsidegap2crows',
+ 'trader_cdlxsidegap3methods',
+ 'trader_ceil',
+ 'trader_cmo',
+ 'trader_correl',
+ 'trader_cos',
+ 'trader_cosh',
+ 'trader_dema',
+ 'trader_div',
+ 'trader_dx',
+ 'trader_ema',
+ 'trader_errno',
+ 'trader_exp',
+ 'trader_floor',
+ 'trader_get_compat',
+ 'trader_get_unstable_period',
+ 'trader_ht_dcperiod',
+ 'trader_ht_dcphase',
+ 'trader_ht_phasor',
+ 'trader_ht_sine',
+ 'trader_ht_trendline',
+ 'trader_ht_trendmode',
+ 'trader_kama',
+ 'trader_linearreg_angle',
+ 'trader_linearreg_intercept',
+ 'trader_linearreg_slope',
+ 'trader_linearreg',
+ 'trader_ln',
+ 'trader_log10',
+ 'trader_ma',
+ 'trader_macd',
+ 'trader_macdext',
+ 'trader_macdfix',
+ 'trader_mama',
+ 'trader_mavp',
+ 'trader_max',
+ 'trader_maxindex',
+ 'trader_medprice',
+ 'trader_mfi',
+ 'trader_midpoint',
+ 'trader_midprice',
+ 'trader_min',
+ 'trader_minindex',
+ 'trader_minmax',
+ 'trader_minmaxindex',
+ 'trader_minus_di',
+ 'trader_minus_dm',
+ 'trader_mom',
+ 'trader_mult',
+ 'trader_natr',
+ 'trader_obv',
+ 'trader_plus_di',
+ 'trader_plus_dm',
+ 'trader_ppo',
+ 'trader_roc',
+ 'trader_rocp',
+ 'trader_rocr100',
+ 'trader_rocr',
+ 'trader_rsi',
+ 'trader_sar',
+ 'trader_sarext',
+ 'trader_set_compat',
+ 'trader_set_unstable_period',
+ 'trader_sin',
+ 'trader_sinh',
+ 'trader_sma',
+ 'trader_sqrt',
+ 'trader_stddev',
+ 'trader_stoch',
+ 'trader_stochf',
+ 'trader_stochrsi',
+ 'trader_sub',
+ 'trader_sum',
+ 'trader_t3',
+ 'trader_tan',
+ 'trader_tanh',
+ 'trader_tema',
+ 'trader_trange',
+ 'trader_trima',
+ 'trader_trix',
+ 'trader_tsf',
+ 'trader_typprice',
+ 'trader_ultosc',
+ 'trader_var',
+ 'trader_wclprice',
+ 'trader_willr',
+ 'trader_wma'),
+ 'URL': ('base64_decode',
+ 'base64_encode',
+ 'get_headers',
+ 'get_meta_tags',
+ 'http_build_query',
+ 'parse_url',
+ 'rawurldecode',
+ 'rawurlencode',
+ 'urldecode',
+ 'urlencode'),
+ 'Uopz': ('uopz_add_function',
+ 'uopz_allow_exit',
+ 'uopz_backup',
+ 'uopz_compose',
+ 'uopz_copy',
+ 'uopz_del_function',
+ 'uopz_delete',
+ 'uopz_extend',
+ 'uopz_flags',
+ 'uopz_function',
+ 'uopz_get_exit_status',
+ 'uopz_get_hook',
+ 'uopz_get_mock',
+ 'uopz_get_property',
+ 'uopz_get_return',
+ 'uopz_get_static',
+ 'uopz_implement',
+ 'uopz_overload',
+ 'uopz_redefine',
+ 'uopz_rename',
+ 'uopz_restore',
+ 'uopz_set_hook',
+ 'uopz_set_mock',
+ 'uopz_set_property',
+ 'uopz_set_return',
+ 'uopz_set_static',
+ 'uopz_undefine',
+ 'uopz_unset_hook',
+ 'uopz_unset_mock',
+ 'uopz_unset_return'),
+ 'Variable handling': ('boolval',
+ 'debug_zval_dump',
+ 'doubleval',
+ 'empty',
+ 'floatval',
+ 'get_debug_type',
+ 'get_defined_vars',
+ 'get_resource_id',
+ 'get_resource_type',
+ 'gettype',
+ 'intval',
+ 'is_array',
+ 'is_bool',
+ 'is_callable',
+ 'is_countable',
+ 'is_double',
+ 'is_float',
+ 'is_int',
+ 'is_integer',
+ 'is_iterable',
+ 'is_long',
+ 'is_null',
+ 'is_numeric',
+ 'is_object',
+ 'is_real',
+ 'is_resource',
+ 'is_scalar',
+ 'is_string',
+ 'isset',
+ 'print_r',
+ 'serialize',
+ 'settype',
+ 'strval',
+ 'unserialize',
+ 'unset',
+ 'var_dump',
+ 'var_export'),
+ 'WDDX': ('wddx_add_vars',
+ 'wddx_deserialize',
+ 'wddx_packet_end',
+ 'wddx_packet_start',
+ 'wddx_serialize_value',
+ 'wddx_serialize_vars'),
+ 'WinCache': ('wincache_fcache_fileinfo',
+ 'wincache_fcache_meminfo',
+ 'wincache_lock',
+ 'wincache_ocache_fileinfo',
+ 'wincache_ocache_meminfo',
+ 'wincache_refresh_if_changed',
+ 'wincache_rplist_fileinfo',
+ 'wincache_rplist_meminfo',
+ 'wincache_scache_info',
+ 'wincache_scache_meminfo',
+ 'wincache_ucache_add',
+ 'wincache_ucache_cas',
+ 'wincache_ucache_clear',
+ 'wincache_ucache_dec',
+ 'wincache_ucache_delete',
+ 'wincache_ucache_exists',
+ 'wincache_ucache_get',
+ 'wincache_ucache_inc',
+ 'wincache_ucache_info',
+ 'wincache_ucache_meminfo',
+ 'wincache_ucache_set',
+ 'wincache_unlock'),
+ 'XML Parser': ('utf8_decode',
+ 'utf8_encode',
+ 'xml_error_string',
+ 'xml_get_current_byte_index',
+ 'xml_get_current_column_number',
+ 'xml_get_current_line_number',
+ 'xml_get_error_code',
+ 'xml_parse_into_struct',
+ 'xml_parse',
+ 'xml_parser_create_ns',
+ 'xml_parser_create',
+ 'xml_parser_free',
+ 'xml_parser_get_option',
+ 'xml_parser_set_option',
+ 'xml_set_character_data_handler',
+ 'xml_set_default_handler',
+ 'xml_set_element_handler',
+ 'xml_set_end_namespace_decl_handler',
+ 'xml_set_external_entity_ref_handler',
+ 'xml_set_notation_decl_handler',
+ 'xml_set_object',
+ 'xml_set_processing_instruction_handler',
+ 'xml_set_start_namespace_decl_handler',
+ 'xml_set_unparsed_entity_decl_handler'),
+ 'XML-RPC': ('xmlrpc_decode_request',
+ 'xmlrpc_decode',
+ 'xmlrpc_encode_request',
+ 'xmlrpc_encode',
+ 'xmlrpc_get_type',
+ 'xmlrpc_is_fault',
+ 'xmlrpc_parse_method_descriptions',
+ 'xmlrpc_server_add_introspection_data',
+ 'xmlrpc_server_call_method',
+ 'xmlrpc_server_create',
+ 'xmlrpc_server_destroy',
+ 'xmlrpc_server_register_introspection_callback',
+ 'xmlrpc_server_register_method',
+ 'xmlrpc_set_type'),
+ 'Xhprof': ('xhprof_disable',
+ 'xhprof_enable',
+ 'xhprof_sample_disable',
+ 'xhprof_sample_enable'),
+ 'YAZ': ('yaz_addinfo',
+ 'yaz_ccl_conf',
+ 'yaz_ccl_parse',
+ 'yaz_close',
+ 'yaz_connect',
+ 'yaz_database',
+ 'yaz_element',
+ 'yaz_errno',
+ 'yaz_error',
+ 'yaz_es_result',
+ 'yaz_es',
+ 'yaz_get_option',
+ 'yaz_hits',
+ 'yaz_itemorder',
+ 'yaz_present',
+ 'yaz_range',
+ 'yaz_record',
+ 'yaz_scan_result',
+ 'yaz_scan',
+ 'yaz_schema',
+ 'yaz_search',
+ 'yaz_set_option',
+ 'yaz_sort',
+ 'yaz_syntax',
+ 'yaz_wait'),
+ 'Yaml': ('yaml_emit_file',
+ 'yaml_emit',
+ 'yaml_parse_file',
+ 'yaml_parse_url',
+ 'yaml_parse'),
+ 'Zip': ('zip_close',
+ 'zip_entry_close',
+ 'zip_entry_compressedsize',
+ 'zip_entry_compressionmethod',
+ 'zip_entry_filesize',
+ 'zip_entry_name',
+ 'zip_entry_open',
+ 'zip_entry_read',
+ 'zip_open',
+ 'zip_read'),
+ 'Zlib': ('deflate_add',
+ 'deflate_init',
+ 'gzclose',
+ 'gzcompress',
+ 'gzdecode',
+ 'gzdeflate',
+ 'gzencode',
+ 'gzeof',
+ 'gzfile',
+ 'gzgetc',
+ 'gzgets',
+ 'gzgetss',
+ 'gzinflate',
+ 'gzopen',
+ 'gzpassthru',
+ 'gzputs',
+ 'gzread',
+ 'gzrewind',
+ 'gzseek',
+ 'gztell',
+ 'gzuncompress',
+ 'gzwrite',
+ 'inflate_add',
+ 'inflate_get_read_len',
+ 'inflate_get_status',
+ 'inflate_init',
+ 'readgzfile',
+ 'zlib_decode',
+ 'zlib_encode',
+ 'zlib_get_coding_type'),
+ 'ZooKeeper': ('zookeeper_dispatch',),
+ 'cURL': ('curl_close',
+ 'curl_copy_handle',
+ 'curl_errno',
+ 'curl_error',
+ 'curl_escape',
+ 'curl_exec',
+ 'curl_file_create',
+ 'curl_getinfo',
+ 'curl_init',
+ 'curl_multi_add_handle',
+ 'curl_multi_close',
+ 'curl_multi_errno',
+ 'curl_multi_exec',
+ 'curl_multi_getcontent',
+ 'curl_multi_info_read',
+ 'curl_multi_init',
+ 'curl_multi_remove_handle',
+ 'curl_multi_select',
+ 'curl_multi_setopt',
+ 'curl_multi_strerror',
+ 'curl_pause',
+ 'curl_reset',
+ 'curl_setopt_array',
+ 'curl_setopt',
+ 'curl_share_close',
+ 'curl_share_errno',
+ 'curl_share_init',
+ 'curl_share_setopt',
+ 'curl_share_strerror',
+ 'curl_strerror',
+ 'curl_unescape',
+ 'curl_version'),
+ 'dBase': ('dbase_add_record',
+ 'dbase_close',
+ 'dbase_create',
+ 'dbase_delete_record',
+ 'dbase_get_header_info',
+ 'dbase_get_record_with_names',
+ 'dbase_get_record',
+ 'dbase_numfields',
+ 'dbase_numrecords',
+ 'dbase_open',
+ 'dbase_pack',
+ 'dbase_replace_record'),
+ 'iconv': ('iconv_get_encoding',
+ 'iconv_mime_decode_headers',
+ 'iconv_mime_decode',
+ 'iconv_mime_encode',
+ 'iconv_set_encoding',
+ 'iconv_strlen',
+ 'iconv_strpos',
+ 'iconv_strrpos',
+ 'iconv_substr',
+ 'iconv',
+ 'ob_iconv_handler'),
+ 'intl': ('intl_error_name',
+ 'intl_get_error_code',
+ 'intl_get_error_message',
+ 'intl_is_failure'),
+ 'libxml': ('libxml_clear_errors',
+ 'libxml_disable_entity_loader',
+ 'libxml_get_errors',
+ 'libxml_get_last_error',
+ 'libxml_set_external_entity_loader',
+ 'libxml_set_streams_context',
+ 'libxml_use_internal_errors'),
+ 'mqseries': ('mqseries_back',
+ 'mqseries_begin',
+ 'mqseries_close',
+ 'mqseries_cmit',
+ 'mqseries_conn',
+ 'mqseries_connx',
+ 'mqseries_disc',
+ 'mqseries_get',
+ 'mqseries_inq',
+ 'mqseries_open',
+ 'mqseries_put1',
+ 'mqseries_put',
+ 'mqseries_set',
+ 'mqseries_strerror'),
+ 'phpdbg': ('phpdbg_break_file',
+ 'phpdbg_break_function',
+ 'phpdbg_break_method',
+ 'phpdbg_break_next',
+ 'phpdbg_clear',
+ 'phpdbg_color',
+ 'phpdbg_end_oplog',
+ 'phpdbg_exec',
+ 'phpdbg_get_executable',
+ 'phpdbg_prompt',
+ 'phpdbg_start_oplog'),
+ 'runkit7': ('runkit7_constant_add',
+ 'runkit7_constant_redefine',
+ 'runkit7_constant_remove',
+ 'runkit7_function_add',
+ 'runkit7_function_copy',
+ 'runkit7_function_redefine',
+ 'runkit7_function_remove',
+ 'runkit7_function_rename',
+ 'runkit7_import',
+ 'runkit7_method_add',
+ 'runkit7_method_copy',
+ 'runkit7_method_redefine',
+ 'runkit7_method_remove',
+ 'runkit7_method_rename',
+ 'runkit7_object_id',
+ 'runkit7_superglobals',
+ 'runkit7_zval_inspect'),
+ 'ssdeep': ('ssdeep_fuzzy_compare',
+ 'ssdeep_fuzzy_hash_filename',
+ 'ssdeep_fuzzy_hash'),
+ 'var_representation': ('var_representation',),
+ 'win32service': ('win32_continue_service',
+ 'win32_create_service',
+ 'win32_delete_service',
+ 'win32_get_last_control_message',
+ 'win32_pause_service',
+ 'win32_query_service_status',
+ 'win32_send_custom_control',
+ 'win32_set_service_exit_code',
+ 'win32_set_service_exit_mode',
+ 'win32_set_service_status',
+ 'win32_start_service_ctrl_dispatcher',
+ 'win32_start_service',
+ 'win32_stop_service'),
+ 'xattr': ('xattr_get',
+ 'xattr_list',
+ 'xattr_remove',
+ 'xattr_set',
+ 'xattr_supported'),
+ 'xdiff': ('xdiff_file_bdiff_size',
+ 'xdiff_file_bdiff',
+ 'xdiff_file_bpatch',
+ 'xdiff_file_diff_binary',
+ 'xdiff_file_diff',
+ 'xdiff_file_merge3',
+ 'xdiff_file_patch_binary',
+ 'xdiff_file_patch',
+ 'xdiff_file_rabdiff',
+ 'xdiff_string_bdiff_size',
+ 'xdiff_string_bdiff',
+ 'xdiff_string_bpatch',
+ 'xdiff_string_diff_binary',
+ 'xdiff_string_diff',
+ 'xdiff_string_merge3',
+ 'xdiff_string_patch_binary',
+ 'xdiff_string_patch',
+ 'xdiff_string_rabdiff')}
+
+if __name__ == '__main__': # pragma: no cover
+ import glob
+ import os
+ import pprint
+ import re
+ import shutil
+ import tarfile
+ from urllib.request import urlretrieve
+
+ PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
+ PHP_MANUAL_DIR = './php-chunked-xhtml/'
+ PHP_REFERENCE_GLOB = 'ref.*'
+ PHP_FUNCTION_RE = r'<a href="function\..*?\.html">(.*?)</a>'
+ PHP_MODULE_RE = '<title>(.*?) Functions</title>'
+
+ def get_php_functions():
+ function_re = re.compile(PHP_FUNCTION_RE)
+ module_re = re.compile(PHP_MODULE_RE)
+ modules = {}
+
+ for file in get_php_references():
+ module = ''
+ with open(file) as f:
+ for line in f:
+ if not module:
+ search = module_re.search(line)
+ if search:
+ module = search.group(1)
+ modules[module] = []
+
+ elif 'href="function.' in line:
+ for match in function_re.finditer(line):
+ fn = match.group(1)
+ if '»' not in fn and '«' not in fn and \
+ '::' not in fn and '\\' not in fn and \
+ fn not in modules[module]:
+ modules[module].append(fn)
+
+ if module:
+ # These are dummy manual pages, not actual functions
+ if module == 'Filesystem':
+ modules[module].remove('delete')
+
+ if not modules[module]:
+ del modules[module]
+
+ for key in modules:
+ modules[key] = tuple(modules[key])
+ return modules
+
+ def get_php_references():
+ download = urlretrieve(PHP_MANUAL_URL)
+ with tarfile.open(download[0]) as tar:
+ tar.extractall()
+ yield from glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB))
+ os.remove(download[0])
+
+ def regenerate(filename, modules):
+ with open(filename) as fp:
+ content = fp.read()
+
+ header = content[:content.find('MODULES = {')]
+ footer = content[content.find("if __name__ == '__main__':"):]
+
+ with open(filename, 'w') as fp:
+ fp.write(header)
+ fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
+ fp.write(footer)
+
+ def run():
+ print('>> Downloading Function Index')
+ modules = get_php_functions()
+ total = sum(len(v) for v in modules.values())
+ print('%d functions found' % total)
+ regenerate(__file__, modules)
+ shutil.rmtree(PHP_MANUAL_DIR)
+
+ run()
diff --git a/pygments/lexers/_postgres_builtins.py b/pygments/lexers/_postgres_builtins.py
new file mode 100644
index 0000000..3305f84
--- /dev/null
+++ b/pygments/lexers/_postgres_builtins.py
@@ -0,0 +1,684 @@
+"""
+ pygments.lexers._postgres_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Self-updating data files for PostgreSQL lexer.
+
+ Run with `python -I` to update itself.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Autogenerated: please edit them if you like wasting your time.
+
+KEYWORDS = (
+ 'ABORT',
+ 'ABSOLUTE',
+ 'ACCESS',
+ 'ACTION',
+ 'ADD',
+ 'ADMIN',
+ 'AFTER',
+ 'AGGREGATE',
+ 'ALL',
+ 'ALSO',
+ 'ALTER',
+ 'ALWAYS',
+ 'ANALYSE',
+ 'ANALYZE',
+ 'AND',
+ 'ANY',
+ 'ARRAY',
+ 'AS',
+ 'ASC',
+ 'ASENSITIVE',
+ 'ASSERTION',
+ 'ASSIGNMENT',
+ 'ASYMMETRIC',
+ 'AT',
+ 'ATOMIC',
+ 'ATTACH',
+ 'ATTRIBUTE',
+ 'AUTHORIZATION',
+ 'BACKWARD',
+ 'BEFORE',
+ 'BEGIN',
+ 'BETWEEN',
+ 'BIGINT',
+ 'BINARY',
+ 'BIT',
+ 'BOOLEAN',
+ 'BOTH',
+ 'BREADTH',
+ 'BY',
+ 'CACHE',
+ 'CALL',
+ 'CALLED',
+ 'CASCADE',
+ 'CASCADED',
+ 'CASE',
+ 'CAST',
+ 'CATALOG',
+ 'CHAIN',
+ 'CHAR',
+ 'CHARACTER',
+ 'CHARACTERISTICS',
+ 'CHECK',
+ 'CHECKPOINT',
+ 'CLASS',
+ 'CLOSE',
+ 'CLUSTER',
+ 'COALESCE',
+ 'COLLATE',
+ 'COLLATION',
+ 'COLUMN',
+ 'COLUMNS',
+ 'COMMENT',
+ 'COMMENTS',
+ 'COMMIT',
+ 'COMMITTED',
+ 'COMPRESSION',
+ 'CONCURRENTLY',
+ 'CONFIGURATION',
+ 'CONFLICT',
+ 'CONNECTION',
+ 'CONSTRAINT',
+ 'CONSTRAINTS',
+ 'CONTENT',
+ 'CONTINUE',
+ 'CONVERSION',
+ 'COPY',
+ 'COST',
+ 'CREATE',
+ 'CROSS',
+ 'CSV',
+ 'CUBE',
+ 'CURRENT',
+ 'CURRENT_CATALOG',
+ 'CURRENT_DATE',
+ 'CURRENT_ROLE',
+ 'CURRENT_SCHEMA',
+ 'CURRENT_TIME',
+ 'CURRENT_TIMESTAMP',
+ 'CURRENT_USER',
+ 'CURSOR',
+ 'CYCLE',
+ 'DATA',
+ 'DATABASE',
+ 'DAY',
+ 'DEALLOCATE',
+ 'DEC',
+ 'DECIMAL',
+ 'DECLARE',
+ 'DEFAULT',
+ 'DEFAULTS',
+ 'DEFERRABLE',
+ 'DEFERRED',
+ 'DEFINER',
+ 'DELETE',
+ 'DELIMITER',
+ 'DELIMITERS',
+ 'DEPENDS',
+ 'DEPTH',
+ 'DESC',
+ 'DETACH',
+ 'DICTIONARY',
+ 'DISABLE',
+ 'DISCARD',
+ 'DISTINCT',
+ 'DO',
+ 'DOCUMENT',
+ 'DOMAIN',
+ 'DOUBLE',
+ 'DROP',
+ 'EACH',
+ 'ELSE',
+ 'ENABLE',
+ 'ENCODING',
+ 'ENCRYPTED',
+ 'END',
+ 'ENUM',
+ 'ESCAPE',
+ 'EVENT',
+ 'EXCEPT',
+ 'EXCLUDE',
+ 'EXCLUDING',
+ 'EXCLUSIVE',
+ 'EXECUTE',
+ 'EXISTS',
+ 'EXPLAIN',
+ 'EXPRESSION',
+ 'EXTENSION',
+ 'EXTERNAL',
+ 'EXTRACT',
+ 'FALSE',
+ 'FAMILY',
+ 'FETCH',
+ 'FILTER',
+ 'FINALIZE',
+ 'FIRST',
+ 'FLOAT',
+ 'FOLLOWING',
+ 'FOR',
+ 'FORCE',
+ 'FOREIGN',
+ 'FORWARD',
+ 'FREEZE',
+ 'FROM',
+ 'FULL',
+ 'FUNCTION',
+ 'FUNCTIONS',
+ 'GENERATED',
+ 'GLOBAL',
+ 'GRANT',
+ 'GRANTED',
+ 'GREATEST',
+ 'GROUP',
+ 'GROUPING',
+ 'GROUPS',
+ 'HANDLER',
+ 'HAVING',
+ 'HEADER',
+ 'HOLD',
+ 'HOUR',
+ 'IDENTITY',
+ 'IF',
+ 'ILIKE',
+ 'IMMEDIATE',
+ 'IMMUTABLE',
+ 'IMPLICIT',
+ 'IMPORT',
+ 'IN',
+ 'INCLUDE',
+ 'INCLUDING',
+ 'INCREMENT',
+ 'INDEX',
+ 'INDEXES',
+ 'INHERIT',
+ 'INHERITS',
+ 'INITIALLY',
+ 'INLINE',
+ 'INNER',
+ 'INOUT',
+ 'INPUT',
+ 'INSENSITIVE',
+ 'INSERT',
+ 'INSTEAD',
+ 'INT',
+ 'INTEGER',
+ 'INTERSECT',
+ 'INTERVAL',
+ 'INTO',
+ 'INVOKER',
+ 'IS',
+ 'ISNULL',
+ 'ISOLATION',
+ 'JOIN',
+ 'KEY',
+ 'LABEL',
+ 'LANGUAGE',
+ 'LARGE',
+ 'LAST',
+ 'LATERAL',
+ 'LEADING',
+ 'LEAKPROOF',
+ 'LEAST',
+ 'LEFT',
+ 'LEVEL',
+ 'LIKE',
+ 'LIMIT',
+ 'LISTEN',
+ 'LOAD',
+ 'LOCAL',
+ 'LOCALTIME',
+ 'LOCALTIMESTAMP',
+ 'LOCATION',
+ 'LOCK',
+ 'LOCKED',
+ 'LOGGED',
+ 'MAPPING',
+ 'MATCH',
+ 'MATERIALIZED',
+ 'MAXVALUE',
+ 'METHOD',
+ 'MINUTE',
+ 'MINVALUE',
+ 'MODE',
+ 'MONTH',
+ 'MOVE',
+ 'NAME',
+ 'NAMES',
+ 'NATIONAL',
+ 'NATURAL',
+ 'NCHAR',
+ 'NEW',
+ 'NEXT',
+ 'NFC',
+ 'NFD',
+ 'NFKC',
+ 'NFKD',
+ 'NO',
+ 'NONE',
+ 'NORMALIZE',
+ 'NORMALIZED',
+ 'NOT',
+ 'NOTHING',
+ 'NOTIFY',
+ 'NOTNULL',
+ 'NOWAIT',
+ 'NULL',
+ 'NULLIF',
+ 'NULLS',
+ 'NUMERIC',
+ 'OBJECT',
+ 'OF',
+ 'OFF',
+ 'OFFSET',
+ 'OIDS',
+ 'OLD',
+ 'ON',
+ 'ONLY',
+ 'OPERATOR',
+ 'OPTION',
+ 'OPTIONS',
+ 'OR',
+ 'ORDER',
+ 'ORDINALITY',
+ 'OTHERS',
+ 'OUT',
+ 'OUTER',
+ 'OVER',
+ 'OVERLAPS',
+ 'OVERLAY',
+ 'OVERRIDING',
+ 'OWNED',
+ 'OWNER',
+ 'PARALLEL',
+ 'PARSER',
+ 'PARTIAL',
+ 'PARTITION',
+ 'PASSING',
+ 'PASSWORD',
+ 'PLACING',
+ 'PLANS',
+ 'POLICY',
+ 'POSITION',
+ 'PRECEDING',
+ 'PRECISION',
+ 'PREPARE',
+ 'PREPARED',
+ 'PRESERVE',
+ 'PRIMARY',
+ 'PRIOR',
+ 'PRIVILEGES',
+ 'PROCEDURAL',
+ 'PROCEDURE',
+ 'PROCEDURES',
+ 'PROGRAM',
+ 'PUBLICATION',
+ 'QUOTE',
+ 'RANGE',
+ 'READ',
+ 'REAL',
+ 'REASSIGN',
+ 'RECHECK',
+ 'RECURSIVE',
+ 'REF',
+ 'REFERENCES',
+ 'REFERENCING',
+ 'REFRESH',
+ 'REINDEX',
+ 'RELATIVE',
+ 'RELEASE',
+ 'RENAME',
+ 'REPEATABLE',
+ 'REPLACE',
+ 'REPLICA',
+ 'RESET',
+ 'RESTART',
+ 'RESTRICT',
+ 'RETURN',
+ 'RETURNING',
+ 'RETURNS',
+ 'REVOKE',
+ 'RIGHT',
+ 'ROLE',
+ 'ROLLBACK',
+ 'ROLLUP',
+ 'ROUTINE',
+ 'ROUTINES',
+ 'ROW',
+ 'ROWS',
+ 'RULE',
+ 'SAVEPOINT',
+ 'SCHEMA',
+ 'SCHEMAS',
+ 'SCROLL',
+ 'SEARCH',
+ 'SECOND',
+ 'SECURITY',
+ 'SELECT',
+ 'SEQUENCE',
+ 'SEQUENCES',
+ 'SERIALIZABLE',
+ 'SERVER',
+ 'SESSION',
+ 'SESSION_USER',
+ 'SET',
+ 'SETOF',
+ 'SETS',
+ 'SHARE',
+ 'SHOW',
+ 'SIMILAR',
+ 'SIMPLE',
+ 'SKIP',
+ 'SMALLINT',
+ 'SNAPSHOT',
+ 'SOME',
+ 'SQL',
+ 'STABLE',
+ 'STANDALONE',
+ 'START',
+ 'STATEMENT',
+ 'STATISTICS',
+ 'STDIN',
+ 'STDOUT',
+ 'STORAGE',
+ 'STORED',
+ 'STRICT',
+ 'STRIP',
+ 'SUBSCRIPTION',
+ 'SUBSTRING',
+ 'SUPPORT',
+ 'SYMMETRIC',
+ 'SYSID',
+ 'SYSTEM',
+ 'TABLE',
+ 'TABLES',
+ 'TABLESAMPLE',
+ 'TABLESPACE',
+ 'TEMP',
+ 'TEMPLATE',
+ 'TEMPORARY',
+ 'TEXT',
+ 'THEN',
+ 'TIES',
+ 'TIME',
+ 'TIMESTAMP',
+ 'TO',
+ 'TRAILING',
+ 'TRANSACTION',
+ 'TRANSFORM',
+ 'TREAT',
+ 'TRIGGER',
+ 'TRIM',
+ 'TRUE',
+ 'TRUNCATE',
+ 'TRUSTED',
+ 'TYPE',
+ 'TYPES',
+ 'UESCAPE',
+ 'UNBOUNDED',
+ 'UNCOMMITTED',
+ 'UNENCRYPTED',
+ 'UNION',
+ 'UNIQUE',
+ 'UNKNOWN',
+ 'UNLISTEN',
+ 'UNLOGGED',
+ 'UNTIL',
+ 'UPDATE',
+ 'USER',
+ 'USING',
+ 'VACUUM',
+ 'VALID',
+ 'VALIDATE',
+ 'VALIDATOR',
+ 'VALUE',
+ 'VALUES',
+ 'VARCHAR',
+ 'VARIADIC',
+ 'VARYING',
+ 'VERBOSE',
+ 'VERSION',
+ 'VIEW',
+ 'VIEWS',
+ 'VOLATILE',
+ 'WHEN',
+ 'WHERE',
+ 'WHITESPACE',
+ 'WINDOW',
+ 'WITH',
+ 'WITHIN',
+ 'WITHOUT',
+ 'WORK',
+ 'WRAPPER',
+ 'WRITE',
+ 'XML',
+ 'XMLATTRIBUTES',
+ 'XMLCONCAT',
+ 'XMLELEMENT',
+ 'XMLEXISTS',
+ 'XMLFOREST',
+ 'XMLNAMESPACES',
+ 'XMLPARSE',
+ 'XMLPI',
+ 'XMLROOT',
+ 'XMLSERIALIZE',
+ 'XMLTABLE',
+ 'YEAR',
+ 'YES',
+ 'ZONE',
+)
+
+DATATYPES = (
+ 'bigint',
+ 'bigserial',
+ 'bit',
+ 'bit varying',
+ 'bool',
+ 'boolean',
+ 'box',
+ 'bytea',
+ 'char',
+ 'character',
+ 'character varying',
+ 'cidr',
+ 'circle',
+ 'date',
+ 'decimal',
+ 'double precision',
+ 'float4',
+ 'float8',
+ 'inet',
+ 'int',
+ 'int2',
+ 'int4',
+ 'int8',
+ 'integer',
+ 'interval',
+ 'json',
+ 'jsonb',
+ 'line',
+ 'lseg',
+ 'macaddr',
+ 'macaddr8',
+ 'money',
+ 'numeric',
+ 'path',
+ 'pg_lsn',
+ 'pg_snapshot',
+ 'point',
+ 'polygon',
+ 'real',
+ 'serial',
+ 'serial2',
+ 'serial4',
+ 'serial8',
+ 'smallint',
+ 'smallserial',
+ 'text',
+ 'time',
+ 'timestamp',
+ 'timestamptz',
+ 'timetz',
+ 'tsquery',
+ 'tsvector',
+ 'txid_snapshot',
+ 'uuid',
+ 'varbit',
+ 'varchar',
+ 'with time zone',
+ 'without time zone',
+ 'xml',
+)
+
+PSEUDO_TYPES = (
+ 'any',
+ 'anyarray',
+ 'anycompatible',
+ 'anycompatiblearray',
+ 'anycompatiblemultirange',
+ 'anycompatiblenonarray',
+ 'anycompatiblerange',
+ 'anyelement',
+ 'anyenum',
+ 'anymultirange',
+ 'anynonarray',
+ 'anyrange',
+ 'cstring',
+ 'event_trigger',
+ 'fdw_handler',
+ 'index_am_handler',
+ 'internal',
+ 'language_handler',
+ 'pg_ddl_command',
+ 'record',
+ 'table_am_handler',
+ 'trigger',
+ 'tsm_handler',
+ 'unknown',
+ 'void',
+)
+
+# Remove 'trigger' from types
+PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
+
+PLPGSQL_KEYWORDS = (
+ 'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
+ 'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
+ 'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
+)
+
+
+if __name__ == '__main__': # pragma: no cover
+ import re
+ from urllib.request import urlopen
+
+ from pygments.util import format_lines
+
+ # One man's constant is another man's variable.
+ SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
+ KEYWORDS_URL = SOURCE_URL + '/src/include/parser/kwlist.h'
+ DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
+
+ def update_myself():
+ content = urlopen(DATATYPES_URL).read().decode('utf-8', errors='ignore')
+ data_file = list(content.splitlines())
+ datatypes = parse_datatypes(data_file)
+ pseudos = parse_pseudos(data_file)
+
+ content = urlopen(KEYWORDS_URL).read().decode('utf-8', errors='ignore')
+ keywords = parse_keywords(content)
+
+ update_consts(__file__, 'DATATYPES', datatypes)
+ update_consts(__file__, 'PSEUDO_TYPES', pseudos)
+ update_consts(__file__, 'KEYWORDS', keywords)
+
+ def parse_keywords(f):
+ kw = []
+ for m in re.finditer(r'PG_KEYWORD\("(.+?)"', f):
+ kw.append(m.group(1).upper())
+
+ if not kw:
+ raise ValueError('no keyword found')
+
+ kw.sort()
+ return kw
+
+ def parse_datatypes(f):
+ dt = set()
+ for line in f:
+ if '<sect1' in line:
+ break
+ if '<entry><type>' not in line:
+ continue
+
+ # Parse a string such as
+ # time [ (<replaceable>p</replaceable>) ] [ without time zone ]
+ # into types "time" and "without time zone"
+
+ # remove all the tags
+ line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
+ line = re.sub("<[^>]+>", "", line)
+
+ # Drop the parts containing braces
+ for tmp in [t for tmp in line.split('[')
+ for t in tmp.split(']') if "(" not in t]:
+ for t in tmp.split(','):
+ t = t.strip()
+ if not t: continue
+ dt.add(" ".join(t.split()))
+
+ dt = list(dt)
+ dt.sort()
+ return dt
+
+ def parse_pseudos(f):
+ dt = []
+ re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
+ re_entry = re.compile(r'\s*<entry><type>(.+?)</type></entry>')
+ re_end = re.compile(r'\s*</table>')
+
+ f = iter(f)
+ for line in f:
+ if re_start.match(line) is not None:
+ break
+ else:
+ raise ValueError('pseudo datatypes table not found')
+
+ for line in f:
+ m = re_entry.match(line)
+ if m is not None:
+ dt.append(m.group(1))
+
+ if re_end.match(line) is not None:
+ break
+ else:
+ raise ValueError('end of pseudo datatypes table not found')
+
+ if not dt:
+ raise ValueError('pseudo datatypes not found')
+
+ dt.sort()
+ return dt
+
+ def update_consts(filename, constname, content):
+ with open(filename) as f:
+ data = f.read()
+
+ # Line to start/end inserting
+ re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S)
+ m = re_match.search(data)
+ if not m:
+ raise ValueError('Could not find existing definition for %s' %
+ (constname,))
+
+ new_block = format_lines(constname, content)
+ data = data[:m.start()] + new_block + data[m.end():]
+
+ with open(filename, 'w', newline='\n') as f:
+ f.write(data)
+
+ update_myself()
diff --git a/pygments/lexers/_qlik_builtins.py b/pygments/lexers/_qlik_builtins.py
new file mode 100644
index 0000000..d2759b1
--- /dev/null
+++ b/pygments/lexers/_qlik_builtins.py
@@ -0,0 +1,666 @@
+"""
+ pygments.lexers._qlik_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Qlik builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# operators
+# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/Operators/operators.htm
+OPERATORS_LIST = {
+ "words": [
+ # Bit operators
+ "bitnot",
+ "bitand",
+ "bitor",
+ "bitxor",
+ # Logical operators
+ "and",
+ "or",
+ "not",
+ "xor",
+ # Relational operators
+ "precedes",
+ "follows",
+ # String operators
+ "like",
+ ],
+ "symbols": [
+ # Bit operators
+ ">>",
+ "<<",
+ # Logical operators
+ # Numeric operators
+ "+",
+ "-",
+ "/",
+ "*",
+ # Relational operators
+ "<",
+ "<=",
+ ">",
+ ">=",
+ "=",
+ "<>",
+ # String operators
+ "&",
+ ],
+}
+
+# SCRIPT STATEMENTS
+# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/
+STATEMENT_LIST = [
+ # control statements
+ "for",
+ "each",
+ "in",
+ "next",
+ "do",
+ "while",
+ "until",
+ "unless",
+ "loop",
+ "return",
+ "switch",
+ "case",
+ "default",
+ "if",
+ "else",
+ "endif",
+ "then",
+ "end",
+ "exit",
+ "script",
+ "switch",
+ # prefixes
+ "Add",
+ "Buffer",
+ "Concatenate",
+ "Crosstable",
+ "First",
+ "Generic",
+ "Hierarchy",
+ "HierarchyBelongsTo",
+ "Inner",
+ "IntervalMatch",
+ "Join",
+ "Keep",
+ "Left",
+ "Mapping",
+ "Merge",
+ "NoConcatenate",
+ "Outer",
+ "Partial reload",
+ "Replace",
+ "Right",
+ "Sample",
+ "Semantic",
+ "Unless",
+ "When",
+ # regular statements
+ "Alias", # alias ... as ...
+ "as",
+ "AutoNumber",
+ "Binary",
+ "Comment field", # comment fields ... using ...
+ "Comment fields", # comment field ... with ...
+ "using",
+ "with",
+ "Comment table", # comment table ... with ...
+ "Comment tables", # comment tables ... using ...
+ "Connect",
+ "ODBC", # ODBC CONNECT TO ...
+ "OLEBD", # OLEDB CONNECT TO ...
+ "CUSTOM", # CUSTOM CONNECT TO ...
+ "LIB", # LIB CONNECT TO ...
+ "Declare",
+ "Derive",
+ "From",
+ "explicit",
+ "implicit",
+ "Direct Query",
+ "dimension",
+ "measure",
+ "Directory",
+ "Disconnect",
+ "Drop field",
+ "Drop fields",
+ "Drop table",
+ "Drop tables",
+ "Execute",
+ "FlushLog",
+ "Force",
+ "capitalization",
+ "case upper",
+ "case lower",
+ "case mixed",
+ "Load",
+ "distinct",
+ "from",
+ "inline",
+ "resident",
+ "from_field",
+ "autogenerate",
+ "extension",
+ "where",
+ "group by",
+ "order by",
+ "asc",
+ "desc",
+ "Let",
+ "Loosen Table",
+ "Map",
+ "NullAsNull",
+ "NullAsValue",
+ "Qualify",
+ "Rem",
+ "Rename field",
+ "Rename fields",
+ "Rename table",
+ "Rename tables",
+ "Search",
+ "include",
+ "exclude",
+ "Section",
+ "access",
+ "application",
+ "Select",
+ "Set",
+ "Sleep",
+ "SQL",
+ "SQLColumns",
+ "SQLTables",
+ "SQLTypes",
+ "Star",
+ "Store",
+ "Tag",
+ "Trace",
+ "Unmap",
+ "Unqualify",
+ "Untag",
+ # Qualifiers
+ "total",
+]
+
+# Script functions
+# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/functions-in-scripts-chart-expressions.htm
+SCRIPT_FUNCTIONS = [
+ # Basic aggregation functions in the data load script
+ "FirstSortedValue",
+ "Max",
+ "Min",
+ "Mode",
+ "Only",
+ "Sum",
+ # Counter aggregation functions in the data load script
+ "Count",
+ "MissingCount",
+ "NullCount",
+ "NumericCount",
+ "TextCount",
+ # Financial aggregation functions in the data load script
+ "IRR",
+ "XIRR",
+ "NPV",
+ "XNPV",
+ # Statistical aggregation functions in the data load script
+ "Avg",
+ "Correl",
+ "Fractile",
+ "FractileExc",
+ "Kurtosis",
+ "LINEST_B" "LINEST_df",
+ "LINEST_f",
+ "LINEST_m",
+ "LINEST_r2",
+ "LINEST_seb",
+ "LINEST_sem",
+ "LINEST_sey",
+ "LINEST_ssreg",
+ "Linest_ssresid",
+ "Median",
+ "Skew",
+ "Stdev",
+ "Sterr",
+ "STEYX",
+ # Statistical test functions
+ "Chi2Test_chi2",
+ "Chi2Test_df",
+ "Chi2Test_p",
+ # Two independent samples t-tests
+ "ttest_conf",
+ "ttest_df",
+ "ttest_dif",
+ "ttest_lower",
+ "ttest_sig",
+ "ttest_sterr",
+ "ttest_t",
+ "ttest_upper",
+ # Two independent weighted samples t-tests
+ "ttestw_conf",
+ "ttestw_df",
+ "ttestw_dif",
+ "ttestw_lower",
+ "ttestw_sig",
+ "ttestw_sterr",
+ "ttestw_t",
+ "ttestw_upper",
+ # One sample t-tests
+ "ttest1_conf",
+ "ttest1_df",
+ "ttest1_dif",
+ "ttest1_lower",
+ "ttest1_sig",
+ "ttest1_sterr",
+ "ttest1_t",
+ "ttest1_upper",
+ # One weighted sample t-tests
+ "ttest1w_conf",
+ "ttest1w_df",
+ "ttest1w_dif",
+ "ttest1w_lower",
+ "ttest1w_sig",
+ "ttest1w_sterr",
+ "ttest1w_t",
+ "ttest1w_upper",
+ # One column format functions
+ "ztest_conf",
+ "ztest_dif",
+ "ztest_sig",
+ "ztest_sterr",
+ "ztest_z",
+ "ztest_lower",
+ "ztest_upper",
+ # Weighted two-column format functions
+ "ztestw_conf",
+ "ztestw_dif",
+ "ztestw_lower",
+ "ztestw_sig",
+ "ztestw_sterr",
+ "ztestw_upper",
+ "ztestw_z",
+ # String aggregation functions in the data load script
+ "Concat",
+ "FirstValue",
+ "LastValue",
+ "MaxString",
+ "MinString",
+ # Synthetic dimension functions
+ "ValueList",
+ "ValueLoop",
+ # Color functions
+ "ARGB",
+ "HSL",
+ "RGB",
+ "Color",
+ "Colormix1",
+ "Colormix2",
+ "SysColor",
+ "ColorMapHue",
+ "ColorMapJet",
+ "black",
+ "blue",
+ "brown",
+ "cyan",
+ "darkgray",
+ "green",
+ "lightblue",
+ "lightcyan",
+ "lightgray",
+ "lightgreen",
+ "lightmagenta",
+ "lightred",
+ "magenta",
+ "red",
+ "white",
+ "yellow",
+ # Conditional functions
+ "alt",
+ "class",
+ "coalesce",
+ "if",
+ "match",
+ "mixmatch",
+ "pick",
+ "wildmatch",
+ # Counter functions
+ "autonumber",
+ "autonumberhash128",
+ "autonumberhash256",
+ "IterNo",
+ "RecNo",
+ "RowNo",
+ # Integer expressions of time
+ "second",
+ "minute",
+ "hour",
+ "day",
+ "week",
+ "month",
+ "year",
+ "weekyear",
+ "weekday",
+ # Timestamp functions
+ "now",
+ "today",
+ "LocalTime",
+ # Make functions
+ "makedate",
+ "makeweekdate",
+ "maketime",
+ # Other date functions
+ "AddMonths",
+ "AddYears",
+ "yeartodate",
+ # Timezone functions
+ "timezone",
+ "GMT",
+ "UTC",
+ "daylightsaving",
+ "converttolocaltime",
+ # Set time functions
+ "setdateyear",
+ "setdateyearmonth",
+ # In... functions
+ "inyear",
+ "inyeartodate",
+ "inquarter",
+ "inquartertodate",
+ "inmonth",
+ "inmonthtodate",
+ "inmonths",
+ "inmonthstodate",
+ "inweek",
+ "inweektodate",
+ "inlunarweek",
+ "inlunarweektodate",
+ "inday",
+ "indaytotime",
+ # Start ... end functions
+ "yearstart",
+ "yearend",
+ "yearname",
+ "quarterstart",
+ "quarterend",
+ "quartername",
+ "monthstart",
+ "monthend",
+ "monthname",
+ "monthsstart",
+ "monthsend",
+ "monthsname",
+ "weekstart",
+ "weekend",
+ "weekname",
+ "lunarweekstart",
+ "lunarweekend",
+ "lunarweekname",
+ "daystart",
+ "dayend",
+ "dayname",
+ # Day numbering functions
+ "age",
+ "networkdays",
+ "firstworkdate",
+ "lastworkdate",
+ "daynumberofyear",
+ "daynumberofquarter",
+ # Exponential and logarithmic
+ "exp",
+ "log",
+ "log10",
+ "pow",
+ "sqr",
+ "sqrt",
+ # Count functions
+ "GetAlternativeCount",
+ "GetExcludedCount",
+ "GetNotSelectedCount",
+ "GetPossibleCount",
+ "GetSelectedCount",
+ # Field and selection functions
+ "GetCurrentSelections",
+ "GetFieldSelections",
+ "GetObjectDimension",
+ "GetObjectField",
+ "GetObjectMeasure",
+ # File functions
+ "Attribute",
+ "ConnectString",
+ "FileBaseName",
+ "FileDir",
+ "FileExtension",
+ "FileName",
+ "FilePath",
+ "FileSize",
+ "FileTime",
+ "GetFolderPath",
+ "QvdCreateTime",
+ "QvdFieldName",
+ "QvdNoOfFields",
+ "QvdNoOfRecords",
+ "QvdTableName",
+ # Financial functions
+ "FV",
+ "nPer",
+ "Pmt",
+ "PV",
+ "Rate",
+ # Formatting functions
+ "ApplyCodepage",
+ "Date",
+ "Dual",
+ "Interval",
+ "Money",
+ "Num",
+ "Time",
+ "Timestamp",
+ # General numeric functions
+ "bitcount",
+ "div",
+ "fabs",
+ "fact",
+ "frac",
+ "sign",
+ # Combination and permutation functions
+ "combin",
+ "permut",
+ # Modulo functions
+ "fmod",
+ "mod",
+ # Parity functions
+ "even",
+ "odd",
+ # Rounding functions
+ "ceil",
+ "floor",
+ "round",
+ # Geospatial functions
+ "GeoAggrGeometry",
+ "GeoBoundingBox",
+ "GeoCountVertex",
+ "GeoInvProjectGeometry",
+ "GeoProjectGeometry",
+ "GeoReduceGeometry",
+ "GeoGetBoundingBox",
+ "GeoGetPolygonCenter",
+ "GeoMakePoint",
+ "GeoProject",
+ # Interpretation functions
+ "Date#",
+ "Interval#",
+ "Money#",
+ "Num#",
+ "Text",
+ "Time#",
+ "Timestamp#",
+ # Field functions
+ "FieldIndex",
+ "FieldValue",
+ "FieldValueCount",
+ # Inter-record functions in the data load script
+ "Exists",
+ "LookUp",
+ "Peek",
+ "Previous",
+ # Logical functions
+ "IsNum",
+ "IsText",
+ # Mapping functions
+ "ApplyMap",
+ "MapSubstring",
+ # Mathematical functions
+ "e",
+ "false",
+ "pi",
+ "rand",
+ "true",
+ # NULL functions
+ "EmptyIsNull",
+ "IsNull",
+ "Null",
+ # Basic range functions
+ "RangeMax",
+ "RangeMaxString",
+ "RangeMin",
+ "RangeMinString",
+ "RangeMode",
+ "RangeOnly",
+ "RangeSum",
+ # Counter range functions
+ "RangeCount",
+ "RangeMissingCount",
+ "RangeNullCount",
+ "RangeNumericCount",
+ "RangeTextCount",
+ # Statistical range functions
+ "RangeAvg",
+ "RangeCorrel",
+ "RangeFractile",
+ "RangeKurtosis",
+ "RangeSkew",
+ "RangeStdev",
+ # Financial range functions
+ "RangeIRR",
+ "RangeNPV",
+ "RangeXIRR",
+ "RangeXNPV",
+ # Statistical distribution
+ "CHIDIST",
+ "CHIINV",
+ "NORMDIST",
+ "NORMINV",
+ "TDIST",
+ "TINV",
+ "FDIST",
+ "FINV",
+ # String functions
+ "Capitalize",
+ "Chr",
+ "Evaluate",
+ "FindOneOf",
+ "Hash128",
+ "Hash160",
+ "Hash256",
+ "Index",
+ "KeepChar",
+ "Left",
+ "Len",
+ "LevenshteinDist",
+ "Lower",
+ "LTrim",
+ "Mid",
+ "Ord",
+ "PurgeChar",
+ "Repeat",
+ "Replace",
+ "Right",
+ "RTrim",
+ "SubField",
+ "SubStringCount",
+ "TextBetween",
+ "Trim",
+ "Upper",
+ # System functions
+ "Author",
+ "ClientPlatform",
+ "ComputerName",
+ "DocumentName",
+ "DocumentPath",
+ "DocumentTitle",
+ "EngineVersion",
+ "GetCollationLocale",
+ "GetObjectField",
+ "GetRegistryString",
+ "IsPartialReload",
+ "OSUser",
+ "ProductVersion",
+ "ReloadTime",
+ "StateName",
+ # Table functions
+ "FieldName",
+ "FieldNumber",
+ "NoOfFields",
+ "NoOfRows",
+ "NoOfTables",
+ "TableName",
+ "TableNumber",
+]
+
+# System variables and constants
+# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/work-with-variables-in-data-load-editor.htm
+CONSTANT_LIST = [
+ # System Variables
+ "floppy",
+ "cd",
+ "include",
+ "must_include",
+ "hideprefix",
+ "hidesuffix",
+ "qvpath",
+ "qvroot",
+ "QvWorkPath",
+ "QvWorkRoot",
+ "StripComments",
+ "Verbatim",
+ "OpenUrlTimeout",
+ "WinPath",
+ "WinRoot",
+ "CollationLocale",
+ "CreateSearchIndexOnReload",
+ # value handling variables
+ "NullDisplay",
+ "NullInterpret",
+ "NullValue",
+ "OtherSymbol",
+ # Currency formatting
+ "MoneyDecimalSep",
+ "MoneyFormat",
+ "MoneyThousandSep",
+ # Number formatting
+ "DecimalSep",
+ "ThousandSep",
+ "NumericalAbbreviation",
+ # Time formatting
+ "DateFormat",
+ "TimeFormat",
+ "TimestampFormat",
+ "MonthNames",
+ "LongMonthNames",
+ "DayNames",
+ "LongDayNames",
+ "FirstWeekDay",
+ "BrokenWeeks",
+ "ReferenceDay",
+ "FirstMonthOfYear",
+ # Error variables
+ "errormode",
+ "scripterror",
+ "scripterrorcount",
+ "scripterrorlist",
+ # Other
+ "null",
+]
diff --git a/pygments/lexers/_scheme_builtins.py b/pygments/lexers/_scheme_builtins.py
new file mode 100644
index 0000000..4453511
--- /dev/null
+++ b/pygments/lexers/_scheme_builtins.py
@@ -0,0 +1,1609 @@
+"""
+ pygments.lexers._scheme_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Scheme builtins.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Autogenerated by external/scheme-builtins-generator.scm
+# using Guile 3.0.5.130-5a1e7.
+
+scheme_keywords = {
+ "*unspecified*",
+ "...",
+ "=>",
+ "@",
+ "@@",
+ "_",
+ "add-to-load-path",
+ "and",
+ "begin",
+ "begin-deprecated",
+ "case",
+ "case-lambda",
+ "case-lambda*",
+ "cond",
+ "cond-expand",
+ "current-filename",
+ "current-source-location",
+ "debug-set!",
+ "define",
+ "define*",
+ "define-inlinable",
+ "define-library",
+ "define-macro",
+ "define-module",
+ "define-once",
+ "define-option-interface",
+ "define-private",
+ "define-public",
+ "define-record-type",
+ "define-syntax",
+ "define-syntax-parameter",
+ "define-syntax-rule",
+ "define-values",
+ "defmacro",
+ "defmacro-public",
+ "delay",
+ "do",
+ "else",
+ "eval-when",
+ "export",
+ "export!",
+ "export-syntax",
+ "false-if-exception",
+ "identifier-syntax",
+ "if",
+ "import",
+ "include",
+ "include-ci",
+ "include-from-path",
+ "include-library-declarations",
+ "lambda",
+ "lambda*",
+ "let",
+ "let*",
+ "let*-values",
+ "let-syntax",
+ "let-values",
+ "letrec",
+ "letrec*",
+ "letrec-syntax",
+ "library",
+ "load",
+ "match",
+ "match-lambda",
+ "match-lambda*",
+ "match-let",
+ "match-let*",
+ "match-letrec",
+ "or",
+ "parameterize",
+ "print-set!",
+ "quasiquote",
+ "quasisyntax",
+ "quote",
+ "quote-syntax",
+ "re-export",
+ "re-export-syntax",
+ "read-set!",
+ "require-extension",
+ "set!",
+ "start-stack",
+ "syntax",
+ "syntax-case",
+ "syntax-error",
+ "syntax-parameterize",
+ "syntax-rules",
+ "unless",
+ "unquote",
+ "unquote-splicing",
+ "unsyntax",
+ "unsyntax-splicing",
+ "use-modules",
+ "when",
+ "while",
+ "with-ellipsis",
+ "with-fluids",
+ "with-syntax",
+ "λ",
+}
+
+scheme_builtins = {
+ "$sc-dispatch",
+ "%char-set-dump",
+ "%get-pre-modules-obarray",
+ "%get-stack-size",
+ "%global-site-dir",
+ "%init-rdelim-builtins",
+ "%init-rw-builtins",
+ "%library-dir",
+ "%load-announce",
+ "%load-hook",
+ "%make-void-port",
+ "%package-data-dir",
+ "%port-property",
+ "%print-module",
+ "%resolve-variable",
+ "%search-load-path",
+ "%set-port-property!",
+ "%site-ccache-dir",
+ "%site-dir",
+ "%start-stack",
+ "%string-dump",
+ "%symbol-dump",
+ "%warn-auto-compilation-enabled",
+ "*",
+ "+",
+ "-",
+ "->bool",
+ "->char-set",
+ "/",
+ "1+",
+ "1-",
+ "<",
+ "<=",
+ "=",
+ ">",
+ ">=",
+ "abort-to-prompt",
+ "abort-to-prompt*",
+ "abs",
+ "absolute-file-name?",
+ "accept",
+ "access?",
+ "acons",
+ "acos",
+ "acosh",
+ "add-hook!",
+ "addrinfo:addr",
+ "addrinfo:canonname",
+ "addrinfo:fam",
+ "addrinfo:flags",
+ "addrinfo:protocol",
+ "addrinfo:socktype",
+ "adjust-port-revealed!",
+ "alarm",
+ "alist-cons",
+ "alist-copy",
+ "alist-delete",
+ "alist-delete!",
+ "allocate-struct",
+ "and-map",
+ "and=>",
+ "angle",
+ "any",
+ "append",
+ "append!",
+ "append-map",
+ "append-map!",
+ "append-reverse",
+ "append-reverse!",
+ "apply",
+ "array->list",
+ "array-cell-ref",
+ "array-cell-set!",
+ "array-contents",
+ "array-copy!",
+ "array-copy-in-order!",
+ "array-dimensions",
+ "array-equal?",
+ "array-fill!",
+ "array-for-each",
+ "array-in-bounds?",
+ "array-index-map!",
+ "array-length",
+ "array-map!",
+ "array-map-in-order!",
+ "array-rank",
+ "array-ref",
+ "array-set!",
+ "array-shape",
+ "array-slice",
+ "array-slice-for-each",
+ "array-slice-for-each-in-order",
+ "array-type",
+ "array-type-code",
+ "array?",
+ "ash",
+ "asin",
+ "asinh",
+ "assert-load-verbosity",
+ "assoc",
+ "assoc-ref",
+ "assoc-remove!",
+ "assoc-set!",
+ "assq",
+ "assq-ref",
+ "assq-remove!",
+ "assq-set!",
+ "assv",
+ "assv-ref",
+ "assv-remove!",
+ "assv-set!",
+ "atan",
+ "atanh",
+ "autoload-done!",
+ "autoload-done-or-in-progress?",
+ "autoload-in-progress!",
+ "backtrace",
+ "basename",
+ "batch-mode?",
+ "beautify-user-module!",
+ "bind",
+ "bind-textdomain-codeset",
+ "bindtextdomain",
+ "bit-count",
+ "bit-count*",
+ "bit-extract",
+ "bit-invert!",
+ "bit-position",
+ "bit-set*!",
+ "bitvector",
+ "bitvector->list",
+ "bitvector-bit-clear?",
+ "bitvector-bit-set?",
+ "bitvector-clear-all-bits!",
+ "bitvector-clear-bit!",
+ "bitvector-clear-bits!",
+ "bitvector-count",
+ "bitvector-count-bits",
+ "bitvector-fill!",
+ "bitvector-flip-all-bits!",
+ "bitvector-length",
+ "bitvector-position",
+ "bitvector-ref",
+ "bitvector-set!",
+ "bitvector-set-all-bits!",
+ "bitvector-set-bit!",
+ "bitvector-set-bits!",
+ "bitvector?",
+ "boolean?",
+ "bound-identifier=?",
+ "break",
+ "break!",
+ "caaaar",
+ "caaadr",
+ "caaar",
+ "caadar",
+ "caaddr",
+ "caadr",
+ "caar",
+ "cadaar",
+ "cadadr",
+ "cadar",
+ "caddar",
+ "cadddr",
+ "caddr",
+ "cadr",
+ "call-with-blocked-asyncs",
+ "call-with-current-continuation",
+ "call-with-deferred-observers",
+ "call-with-include-port",
+ "call-with-input-file",
+ "call-with-input-string",
+ "call-with-module-autoload-lock",
+ "call-with-output-file",
+ "call-with-output-string",
+ "call-with-port",
+ "call-with-prompt",
+ "call-with-unblocked-asyncs",
+ "call-with-values",
+ "call/cc",
+ "canonicalize-path",
+ "car",
+ "car+cdr",
+ "catch",
+ "cdaaar",
+ "cdaadr",
+ "cdaar",
+ "cdadar",
+ "cdaddr",
+ "cdadr",
+ "cdar",
+ "cddaar",
+ "cddadr",
+ "cddar",
+ "cdddar",
+ "cddddr",
+ "cdddr",
+ "cddr",
+ "cdr",
+ "ceiling",
+ "ceiling-quotient",
+ "ceiling-remainder",
+ "ceiling/",
+ "centered-quotient",
+ "centered-remainder",
+ "centered/",
+ "char->integer",
+ "char-alphabetic?",
+ "char-ci<=?",
+ "char-ci<?",
+ "char-ci=?",
+ "char-ci>=?",
+ "char-ci>?",
+ "char-downcase",
+ "char-general-category",
+ "char-is-both?",
+ "char-lower-case?",
+ "char-numeric?",
+ "char-ready?",
+ "char-set",
+ "char-set->list",
+ "char-set->string",
+ "char-set-adjoin",
+ "char-set-adjoin!",
+ "char-set-any",
+ "char-set-complement",
+ "char-set-complement!",
+ "char-set-contains?",
+ "char-set-copy",
+ "char-set-count",
+ "char-set-cursor",
+ "char-set-cursor-next",
+ "char-set-delete",
+ "char-set-delete!",
+ "char-set-diff+intersection",
+ "char-set-diff+intersection!",
+ "char-set-difference",
+ "char-set-difference!",
+ "char-set-every",
+ "char-set-filter",
+ "char-set-filter!",
+ "char-set-fold",
+ "char-set-for-each",
+ "char-set-hash",
+ "char-set-intersection",
+ "char-set-intersection!",
+ "char-set-map",
+ "char-set-ref",
+ "char-set-size",
+ "char-set-unfold",
+ "char-set-unfold!",
+ "char-set-union",
+ "char-set-union!",
+ "char-set-xor",
+ "char-set-xor!",
+ "char-set<=",
+ "char-set=",
+ "char-set?",
+ "char-titlecase",
+ "char-upcase",
+ "char-upper-case?",
+ "char-whitespace?",
+ "char<=?",
+ "char<?",
+ "char=?",
+ "char>=?",
+ "char>?",
+ "char?",
+ "chdir",
+ "chmod",
+ "chown",
+ "chroot",
+ "circular-list",
+ "circular-list?",
+ "close",
+ "close-fdes",
+ "close-input-port",
+ "close-output-port",
+ "close-port",
+ "closedir",
+ "command-line",
+ "complex?",
+ "compose",
+ "concatenate",
+ "concatenate!",
+ "cond-expand-provide",
+ "connect",
+ "cons",
+ "cons*",
+ "cons-source",
+ "const",
+ "convert-assignment",
+ "copy-file",
+ "copy-random-state",
+ "copy-tree",
+ "cos",
+ "cosh",
+ "count",
+ "crypt",
+ "ctermid",
+ "current-dynamic-state",
+ "current-error-port",
+ "current-input-port",
+ "current-language",
+ "current-load-port",
+ "current-module",
+ "current-output-port",
+ "current-time",
+ "current-warning-port",
+ "datum->random-state",
+ "datum->syntax",
+ "debug-disable",
+ "debug-enable",
+ "debug-options",
+ "debug-options-interface",
+ "default-duplicate-binding-handler",
+ "default-duplicate-binding-procedures",
+ "default-prompt-tag",
+ "define!",
+ "define-module*",
+ "defined?",
+ "delete",
+ "delete!",
+ "delete-duplicates",
+ "delete-duplicates!",
+ "delete-file",
+ "delete1!",
+ "delq",
+ "delq!",
+ "delq1!",
+ "delv",
+ "delv!",
+ "delv1!",
+ "denominator",
+ "directory-stream?",
+ "dirname",
+ "display",
+ "display-application",
+ "display-backtrace",
+ "display-error",
+ "dotted-list?",
+ "doubly-weak-hash-table?",
+ "drain-input",
+ "drop",
+ "drop-right",
+ "drop-right!",
+ "drop-while",
+ "dup",
+ "dup->fdes",
+ "dup->inport",
+ "dup->outport",
+ "dup->port",
+ "dup2",
+ "duplicate-port",
+ "dynamic-call",
+ "dynamic-func",
+ "dynamic-link",
+ "dynamic-object?",
+ "dynamic-pointer",
+ "dynamic-state?",
+ "dynamic-unlink",
+ "dynamic-wind",
+ "effective-version",
+ "eighth",
+ "end-of-char-set?",
+ "endgrent",
+ "endhostent",
+ "endnetent",
+ "endprotoent",
+ "endpwent",
+ "endservent",
+ "ensure-batch-mode!",
+ "environ",
+ "eof-object?",
+ "eq?",
+ "equal?",
+ "eqv?",
+ "error",
+ "euclidean-quotient",
+ "euclidean-remainder",
+ "euclidean/",
+ "eval",
+ "eval-string",
+ "even?",
+ "every",
+ "exact->inexact",
+ "exact-integer-sqrt",
+ "exact-integer?",
+ "exact?",
+ "exception-accessor",
+ "exception-args",
+ "exception-kind",
+ "exception-predicate",
+ "exception-type?",
+ "exception?",
+ "execl",
+ "execle",
+ "execlp",
+ "exit",
+ "exp",
+ "expt",
+ "f32vector",
+ "f32vector->list",
+ "f32vector-length",
+ "f32vector-ref",
+ "f32vector-set!",
+ "f32vector?",
+ "f64vector",
+ "f64vector->list",
+ "f64vector-length",
+ "f64vector-ref",
+ "f64vector-set!",
+ "f64vector?",
+ "fcntl",
+ "fdes->inport",
+ "fdes->outport",
+ "fdes->ports",
+ "fdopen",
+ "fifth",
+ "file-encoding",
+ "file-exists?",
+ "file-is-directory?",
+ "file-name-separator?",
+ "file-port?",
+ "file-position",
+ "file-set-position",
+ "fileno",
+ "filter",
+ "filter!",
+ "filter-map",
+ "find",
+ "find-tail",
+ "finite?",
+ "first",
+ "flock",
+ "floor",
+ "floor-quotient",
+ "floor-remainder",
+ "floor/",
+ "fluid->parameter",
+ "fluid-bound?",
+ "fluid-ref",
+ "fluid-ref*",
+ "fluid-set!",
+ "fluid-thread-local?",
+ "fluid-unset!",
+ "fluid?",
+ "flush-all-ports",
+ "fold",
+ "fold-right",
+ "for-each",
+ "force",
+ "force-output",
+ "format",
+ "fourth",
+ "frame-address",
+ "frame-arguments",
+ "frame-dynamic-link",
+ "frame-instruction-pointer",
+ "frame-previous",
+ "frame-procedure-name",
+ "frame-return-address",
+ "frame-source",
+ "frame-stack-pointer",
+ "frame?",
+ "free-identifier=?",
+ "fsync",
+ "ftell",
+ "gai-strerror",
+ "gc",
+ "gc-disable",
+ "gc-dump",
+ "gc-enable",
+ "gc-run-time",
+ "gc-stats",
+ "gcd",
+ "generate-temporaries",
+ "gensym",
+ "get-internal-real-time",
+ "get-internal-run-time",
+ "get-output-string",
+ "get-print-state",
+ "getaddrinfo",
+ "getaffinity",
+ "getcwd",
+ "getegid",
+ "getenv",
+ "geteuid",
+ "getgid",
+ "getgr",
+ "getgrent",
+ "getgrgid",
+ "getgrnam",
+ "getgroups",
+ "gethost",
+ "gethostbyaddr",
+ "gethostbyname",
+ "gethostent",
+ "gethostname",
+ "getitimer",
+ "getlogin",
+ "getnet",
+ "getnetbyaddr",
+ "getnetbyname",
+ "getnetent",
+ "getpass",
+ "getpeername",
+ "getpgrp",
+ "getpid",
+ "getppid",
+ "getpriority",
+ "getproto",
+ "getprotobyname",
+ "getprotobynumber",
+ "getprotoent",
+ "getpw",
+ "getpwent",
+ "getpwnam",
+ "getpwuid",
+ "getrlimit",
+ "getserv",
+ "getservbyname",
+ "getservbyport",
+ "getservent",
+ "getsid",
+ "getsockname",
+ "getsockopt",
+ "gettext",
+ "gettimeofday",
+ "getuid",
+ "gmtime",
+ "group:gid",
+ "group:mem",
+ "group:name",
+ "group:passwd",
+ "hash",
+ "hash-clear!",
+ "hash-count",
+ "hash-create-handle!",
+ "hash-fold",
+ "hash-for-each",
+ "hash-for-each-handle",
+ "hash-get-handle",
+ "hash-map->list",
+ "hash-ref",
+ "hash-remove!",
+ "hash-set!",
+ "hash-table?",
+ "hashq",
+ "hashq-create-handle!",
+ "hashq-get-handle",
+ "hashq-ref",
+ "hashq-remove!",
+ "hashq-set!",
+ "hashv",
+ "hashv-create-handle!",
+ "hashv-get-handle",
+ "hashv-ref",
+ "hashv-remove!",
+ "hashv-set!",
+ "hashx-create-handle!",
+ "hashx-get-handle",
+ "hashx-ref",
+ "hashx-remove!",
+ "hashx-set!",
+ "hook->list",
+ "hook-empty?",
+ "hook?",
+ "hostent:addr-list",
+ "hostent:addrtype",
+ "hostent:aliases",
+ "hostent:length",
+ "hostent:name",
+ "identifier?",
+ "identity",
+ "imag-part",
+ "in-vicinity",
+ "include-deprecated-features",
+ "inet-lnaof",
+ "inet-makeaddr",
+ "inet-netof",
+ "inet-ntop",
+ "inet-pton",
+ "inexact->exact",
+ "inexact?",
+ "inf",
+ "inf?",
+ "inherit-print-state",
+ "input-port?",
+ "install-r6rs!",
+ "install-r7rs!",
+ "integer->char",
+ "integer-expt",
+ "integer-length",
+ "integer?",
+ "interaction-environment",
+ "iota",
+ "isatty?",
+ "issue-deprecation-warning",
+ "keyword->symbol",
+ "keyword-like-symbol->keyword",
+ "keyword?",
+ "kill",
+ "kw-arg-ref",
+ "last",
+ "last-pair",
+ "lcm",
+ "length",
+ "length+",
+ "link",
+ "list",
+ "list->array",
+ "list->bitvector",
+ "list->char-set",
+ "list->char-set!",
+ "list->f32vector",
+ "list->f64vector",
+ "list->s16vector",
+ "list->s32vector",
+ "list->s64vector",
+ "list->s8vector",
+ "list->string",
+ "list->symbol",
+ "list->typed-array",
+ "list->u16vector",
+ "list->u32vector",
+ "list->u64vector",
+ "list->u8vector",
+ "list->vector",
+ "list-cdr-ref",
+ "list-cdr-set!",
+ "list-copy",
+ "list-head",
+ "list-index",
+ "list-ref",
+ "list-set!",
+ "list-tabulate",
+ "list-tail",
+ "list=",
+ "list?",
+ "listen",
+ "load-compiled",
+ "load-extension",
+ "load-from-path",
+ "load-in-vicinity",
+ "load-user-init",
+ "local-define",
+ "local-define-module",
+ "local-ref",
+ "local-ref-module",
+ "local-remove",
+ "local-set!",
+ "localtime",
+ "log",
+ "log10",
+ "logand",
+ "logbit?",
+ "logcount",
+ "logior",
+ "lognot",
+ "logtest",
+ "logxor",
+ "lookup-duplicates-handlers",
+ "lset-adjoin",
+ "lset-diff+intersection",
+ "lset-diff+intersection!",
+ "lset-difference",
+ "lset-difference!",
+ "lset-intersection",
+ "lset-intersection!",
+ "lset-union",
+ "lset-union!",
+ "lset-xor",
+ "lset-xor!",
+ "lset<=",
+ "lset=",
+ "lstat",
+ "macro-binding",
+ "macro-name",
+ "macro-transformer",
+ "macro-type",
+ "macro?",
+ "macroexpand",
+ "macroexpanded?",
+ "magnitude",
+ "major-version",
+ "make-array",
+ "make-autoload-interface",
+ "make-bitvector",
+ "make-doubly-weak-hash-table",
+ "make-exception",
+ "make-exception-from-throw",
+ "make-exception-type",
+ "make-f32vector",
+ "make-f64vector",
+ "make-fluid",
+ "make-fresh-user-module",
+ "make-generalized-vector",
+ "make-guardian",
+ "make-hash-table",
+ "make-hook",
+ "make-list",
+ "make-module",
+ "make-modules-in",
+ "make-mutable-parameter",
+ "make-object-property",
+ "make-parameter",
+ "make-polar",
+ "make-procedure-with-setter",
+ "make-promise",
+ "make-prompt-tag",
+ "make-record-type",
+ "make-rectangular",
+ "make-regexp",
+ "make-s16vector",
+ "make-s32vector",
+ "make-s64vector",
+ "make-s8vector",
+ "make-shared-array",
+ "make-socket-address",
+ "make-soft-port",
+ "make-srfi-4-vector",
+ "make-stack",
+ "make-string",
+ "make-struct-layout",
+ "make-struct/no-tail",
+ "make-struct/simple",
+ "make-symbol",
+ "make-syntax-transformer",
+ "make-thread-local-fluid",
+ "make-typed-array",
+ "make-u16vector",
+ "make-u32vector",
+ "make-u64vector",
+ "make-u8vector",
+ "make-unbound-fluid",
+ "make-undefined-variable",
+ "make-variable",
+ "make-variable-transformer",
+ "make-vector",
+ "make-vtable",
+ "make-weak-key-hash-table",
+ "make-weak-value-hash-table",
+ "map",
+ "map!",
+ "map-in-order",
+ "max",
+ "member",
+ "memoize-expression",
+ "memoized-typecode",
+ "memq",
+ "memv",
+ "merge",
+ "merge!",
+ "micro-version",
+ "min",
+ "minor-version",
+ "mkdir",
+ "mkdtemp",
+ "mknod",
+ "mkstemp",
+ "mkstemp!",
+ "mktime",
+ "module-add!",
+ "module-autoload!",
+ "module-binder",
+ "module-bound?",
+ "module-call-observers",
+ "module-clear!",
+ "module-constructor",
+ "module-declarative?",
+ "module-defer-observers",
+ "module-define!",
+ "module-define-submodule!",
+ "module-defined?",
+ "module-duplicates-handlers",
+ "module-ensure-local-variable!",
+ "module-export!",
+ "module-export-all!",
+ "module-filename",
+ "module-for-each",
+ "module-generate-unique-id!",
+ "module-gensym",
+ "module-import-interface",
+ "module-import-obarray",
+ "module-kind",
+ "module-local-variable",
+ "module-locally-bound?",
+ "module-make-local-var!",
+ "module-map",
+ "module-modified",
+ "module-name",
+ "module-next-unique-id",
+ "module-obarray",
+ "module-obarray-get-handle",
+ "module-obarray-ref",
+ "module-obarray-remove!",
+ "module-obarray-set!",
+ "module-observe",
+ "module-observe-weak",
+ "module-observers",
+ "module-public-interface",
+ "module-re-export!",
+ "module-ref",
+ "module-ref-submodule",
+ "module-remove!",
+ "module-replace!",
+ "module-replacements",
+ "module-reverse-lookup",
+ "module-search",
+ "module-set!",
+ "module-submodule-binder",
+ "module-submodules",
+ "module-symbol-binding",
+ "module-symbol-interned?",
+ "module-symbol-local-binding",
+ "module-symbol-locally-interned?",
+ "module-transformer",
+ "module-unobserve",
+ "module-use!",
+ "module-use-interfaces!",
+ "module-uses",
+ "module-variable",
+ "module-version",
+ "module-weak-observers",
+ "module?",
+ "modulo",
+ "modulo-expt",
+ "move->fdes",
+ "nan",
+ "nan?",
+ "negate",
+ "negative?",
+ "nested-define!",
+ "nested-define-module!",
+ "nested-ref",
+ "nested-ref-module",
+ "nested-remove!",
+ "nested-set!",
+ "netent:addrtype",
+ "netent:aliases",
+ "netent:name",
+ "netent:net",
+ "newline",
+ "ngettext",
+ "nice",
+ "nil?",
+ "ninth",
+ "noop",
+ "not",
+ "not-pair?",
+ "null-environment",
+ "null-list?",
+ "null?",
+ "number->string",
+ "number?",
+ "numerator",
+ "object->string",
+ "object-address",
+ "object-properties",
+ "object-property",
+ "odd?",
+ "open",
+ "open-fdes",
+ "open-file",
+ "open-input-file",
+ "open-input-string",
+ "open-io-file",
+ "open-output-file",
+ "open-output-string",
+ "opendir",
+ "or-map",
+ "output-port?",
+ "pair-fold",
+ "pair-fold-right",
+ "pair-for-each",
+ "pair?",
+ "parameter-converter",
+ "parameter-fluid",
+ "parameter?",
+ "parse-path",
+ "parse-path-with-ellipsis",
+ "partition",
+ "partition!",
+ "passwd:dir",
+ "passwd:gecos",
+ "passwd:gid",
+ "passwd:name",
+ "passwd:passwd",
+ "passwd:shell",
+ "passwd:uid",
+ "pause",
+ "peek",
+ "peek-char",
+ "pipe",
+ "pk",
+ "port->fdes",
+ "port-closed?",
+ "port-column",
+ "port-conversion-strategy",
+ "port-encoding",
+ "port-filename",
+ "port-for-each",
+ "port-line",
+ "port-mode",
+ "port-revealed",
+ "port-with-print-state",
+ "port?",
+ "positive?",
+ "primitive-_exit",
+ "primitive-eval",
+ "primitive-exit",
+ "primitive-fork",
+ "primitive-load",
+ "primitive-load-path",
+ "primitive-move->fdes",
+ "primitive-read",
+ "print-disable",
+ "print-enable",
+ "print-exception",
+ "print-options",
+ "print-options-interface",
+ "procedure",
+ "procedure-documentation",
+ "procedure-minimum-arity",
+ "procedure-name",
+ "procedure-properties",
+ "procedure-property",
+ "procedure-source",
+ "procedure-with-setter?",
+ "procedure?",
+ "process-use-modules",
+ "program-arguments",
+ "promise?",
+ "proper-list?",
+ "protoent:aliases",
+ "protoent:name",
+ "protoent:proto",
+ "provide",
+ "provided?",
+ "purify-module!",
+ "putenv",
+ "quit",
+ "quotient",
+ "raise",
+ "raise-exception",
+ "random",
+ "random-state->datum",
+ "random-state-from-platform",
+ "random:exp",
+ "random:hollow-sphere!",
+ "random:normal",
+ "random:normal-vector!",
+ "random:solid-sphere!",
+ "random:uniform",
+ "rational?",
+ "rationalize",
+ "read",
+ "read-char",
+ "read-disable",
+ "read-enable",
+ "read-hash-extend",
+ "read-hash-procedure",
+ "read-hash-procedures",
+ "read-options",
+ "read-options-interface",
+ "read-syntax",
+ "readdir",
+ "readlink",
+ "real-part",
+ "real?",
+ "record-accessor",
+ "record-constructor",
+ "record-modifier",
+ "record-predicate",
+ "record-type-constructor",
+ "record-type-descriptor",
+ "record-type-extensible?",
+ "record-type-fields",
+ "record-type-has-parent?",
+ "record-type-mutable-fields",
+ "record-type-name",
+ "record-type-opaque?",
+ "record-type-parent",
+ "record-type-parents",
+ "record-type-properties",
+ "record-type-uid",
+ "record-type?",
+ "record?",
+ "recv!",
+ "recvfrom!",
+ "redirect-port",
+ "reduce",
+ "reduce-right",
+ "regexp-exec",
+ "regexp?",
+ "release-port-handle",
+ "reload-module",
+ "remainder",
+ "remove",
+ "remove!",
+ "remove-hook!",
+ "rename-file",
+ "repl-reader",
+ "reset-hook!",
+ "resolve-interface",
+ "resolve-module",
+ "resolve-r6rs-interface",
+ "restore-signals",
+ "restricted-vector-sort!",
+ "reverse",
+ "reverse!",
+ "reverse-list->string",
+ "rewinddir",
+ "rmdir",
+ "round",
+ "round-ash",
+ "round-quotient",
+ "round-remainder",
+ "round/",
+ "run-hook",
+ "s16vector",
+ "s16vector->list",
+ "s16vector-length",
+ "s16vector-ref",
+ "s16vector-set!",
+ "s16vector?",
+ "s32vector",
+ "s32vector->list",
+ "s32vector-length",
+ "s32vector-ref",
+ "s32vector-set!",
+ "s32vector?",
+ "s64vector",
+ "s64vector->list",
+ "s64vector-length",
+ "s64vector-ref",
+ "s64vector-set!",
+ "s64vector?",
+ "s8vector",
+ "s8vector->list",
+ "s8vector-length",
+ "s8vector-ref",
+ "s8vector-set!",
+ "s8vector?",
+ "save-module-excursion",
+ "scheme-report-environment",
+ "scm-error",
+ "search-path",
+ "second",
+ "seed->random-state",
+ "seek",
+ "select",
+ "self-evaluating?",
+ "send",
+ "sendfile",
+ "sendto",
+ "servent:aliases",
+ "servent:name",
+ "servent:port",
+ "servent:proto",
+ "set-autoloaded!",
+ "set-car!",
+ "set-cdr!",
+ "set-current-dynamic-state",
+ "set-current-error-port",
+ "set-current-input-port",
+ "set-current-module",
+ "set-current-output-port",
+ "set-exception-printer!",
+ "set-module-binder!",
+ "set-module-declarative?!",
+ "set-module-duplicates-handlers!",
+ "set-module-filename!",
+ "set-module-kind!",
+ "set-module-name!",
+ "set-module-next-unique-id!",
+ "set-module-obarray!",
+ "set-module-observers!",
+ "set-module-public-interface!",
+ "set-module-submodule-binder!",
+ "set-module-submodules!",
+ "set-module-transformer!",
+ "set-module-uses!",
+ "set-module-version!",
+ "set-object-properties!",
+ "set-object-property!",
+ "set-port-column!",
+ "set-port-conversion-strategy!",
+ "set-port-encoding!",
+ "set-port-filename!",
+ "set-port-line!",
+ "set-port-revealed!",
+ "set-procedure-minimum-arity!",
+ "set-procedure-properties!",
+ "set-procedure-property!",
+ "set-program-arguments",
+ "set-source-properties!",
+ "set-source-property!",
+ "set-struct-vtable-name!",
+ "set-symbol-property!",
+ "set-tm:gmtoff",
+ "set-tm:hour",
+ "set-tm:isdst",
+ "set-tm:mday",
+ "set-tm:min",
+ "set-tm:mon",
+ "set-tm:sec",
+ "set-tm:wday",
+ "set-tm:yday",
+ "set-tm:year",
+ "set-tm:zone",
+ "setaffinity",
+ "setegid",
+ "setenv",
+ "seteuid",
+ "setgid",
+ "setgr",
+ "setgrent",
+ "setgroups",
+ "sethost",
+ "sethostent",
+ "sethostname",
+ "setitimer",
+ "setlocale",
+ "setnet",
+ "setnetent",
+ "setpgid",
+ "setpriority",
+ "setproto",
+ "setprotoent",
+ "setpw",
+ "setpwent",
+ "setrlimit",
+ "setserv",
+ "setservent",
+ "setsid",
+ "setsockopt",
+ "setter",
+ "setuid",
+ "setvbuf",
+ "seventh",
+ "shared-array-increments",
+ "shared-array-offset",
+ "shared-array-root",
+ "shutdown",
+ "sigaction",
+ "simple-exceptions",
+ "simple-format",
+ "sin",
+ "sinh",
+ "sixth",
+ "sleep",
+ "sloppy-assoc",
+ "sloppy-assq",
+ "sloppy-assv",
+ "sockaddr:addr",
+ "sockaddr:fam",
+ "sockaddr:flowinfo",
+ "sockaddr:path",
+ "sockaddr:port",
+ "sockaddr:scopeid",
+ "socket",
+ "socketpair",
+ "sort",
+ "sort!",
+ "sort-list",
+ "sort-list!",
+ "sorted?",
+ "source-properties",
+ "source-property",
+ "span",
+ "span!",
+ "split-at",
+ "split-at!",
+ "sqrt",
+ "stable-sort",
+ "stable-sort!",
+ "stack-id",
+ "stack-length",
+ "stack-ref",
+ "stack?",
+ "stat",
+ "stat:atime",
+ "stat:atimensec",
+ "stat:blksize",
+ "stat:blocks",
+ "stat:ctime",
+ "stat:ctimensec",
+ "stat:dev",
+ "stat:gid",
+ "stat:ino",
+ "stat:mode",
+ "stat:mtime",
+ "stat:mtimensec",
+ "stat:nlink",
+ "stat:perms",
+ "stat:rdev",
+ "stat:size",
+ "stat:type",
+ "stat:uid",
+ "status:exit-val",
+ "status:stop-sig",
+ "status:term-sig",
+ "strerror",
+ "strftime",
+ "string",
+ "string->char-set",
+ "string->char-set!",
+ "string->list",
+ "string->number",
+ "string->symbol",
+ "string-any",
+ "string-any-c-code",
+ "string-append",
+ "string-append/shared",
+ "string-bytes-per-char",
+ "string-capitalize",
+ "string-capitalize!",
+ "string-ci->symbol",
+ "string-ci<",
+ "string-ci<=",
+ "string-ci<=?",
+ "string-ci<>",
+ "string-ci<?",
+ "string-ci=",
+ "string-ci=?",
+ "string-ci>",
+ "string-ci>=",
+ "string-ci>=?",
+ "string-ci>?",
+ "string-compare",
+ "string-compare-ci",
+ "string-concatenate",
+ "string-concatenate-reverse",
+ "string-concatenate-reverse/shared",
+ "string-concatenate/shared",
+ "string-contains",
+ "string-contains-ci",
+ "string-copy",
+ "string-copy!",
+ "string-count",
+ "string-delete",
+ "string-downcase",
+ "string-downcase!",
+ "string-drop",
+ "string-drop-right",
+ "string-every",
+ "string-every-c-code",
+ "string-fill!",
+ "string-filter",
+ "string-fold",
+ "string-fold-right",
+ "string-for-each",
+ "string-for-each-index",
+ "string-hash",
+ "string-hash-ci",
+ "string-index",
+ "string-index-right",
+ "string-join",
+ "string-length",
+ "string-map",
+ "string-map!",
+ "string-normalize-nfc",
+ "string-normalize-nfd",
+ "string-normalize-nfkc",
+ "string-normalize-nfkd",
+ "string-null?",
+ "string-pad",
+ "string-pad-right",
+ "string-prefix-ci?",
+ "string-prefix-length",
+ "string-prefix-length-ci",
+ "string-prefix?",
+ "string-ref",
+ "string-replace",
+ "string-reverse",
+ "string-reverse!",
+ "string-rindex",
+ "string-set!",
+ "string-skip",
+ "string-skip-right",
+ "string-split",
+ "string-suffix-ci?",
+ "string-suffix-length",
+ "string-suffix-length-ci",
+ "string-suffix?",
+ "string-tabulate",
+ "string-take",
+ "string-take-right",
+ "string-titlecase",
+ "string-titlecase!",
+ "string-tokenize",
+ "string-trim",
+ "string-trim-both",
+ "string-trim-right",
+ "string-unfold",
+ "string-unfold-right",
+ "string-upcase",
+ "string-upcase!",
+ "string-utf8-length",
+ "string-xcopy!",
+ "string<",
+ "string<=",
+ "string<=?",
+ "string<>",
+ "string<?",
+ "string=",
+ "string=?",
+ "string>",
+ "string>=",
+ "string>=?",
+ "string>?",
+ "string?",
+ "strptime",
+ "struct-layout",
+ "struct-ref",
+ "struct-ref/unboxed",
+ "struct-set!",
+ "struct-set!/unboxed",
+ "struct-vtable",
+ "struct-vtable-name",
+ "struct-vtable?",
+ "struct?",
+ "substring",
+ "substring-fill!",
+ "substring-move!",
+ "substring/copy",
+ "substring/read-only",
+ "substring/shared",
+ "supports-source-properties?",
+ "symbol",
+ "symbol->keyword",
+ "symbol->string",
+ "symbol-append",
+ "symbol-fref",
+ "symbol-fset!",
+ "symbol-hash",
+ "symbol-interned?",
+ "symbol-pref",
+ "symbol-prefix-proc",
+ "symbol-property",
+ "symbol-property-remove!",
+ "symbol-pset!",
+ "symbol?",
+ "symlink",
+ "sync",
+ "syntax->datum",
+ "syntax-source",
+ "syntax-violation",
+ "system",
+ "system*",
+ "system-async-mark",
+ "system-error-errno",
+ "system-file-name-convention",
+ "take",
+ "take!",
+ "take-right",
+ "take-while",
+ "take-while!",
+ "tan",
+ "tanh",
+ "tcgetpgrp",
+ "tcsetpgrp",
+ "tenth",
+ "textdomain",
+ "third",
+ "throw",
+ "thunk?",
+ "times",
+ "tm:gmtoff",
+ "tm:hour",
+ "tm:isdst",
+ "tm:mday",
+ "tm:min",
+ "tm:mon",
+ "tm:sec",
+ "tm:wday",
+ "tm:yday",
+ "tm:year",
+ "tm:zone",
+ "tmpfile",
+ "tmpnam",
+ "tms:clock",
+ "tms:cstime",
+ "tms:cutime",
+ "tms:stime",
+ "tms:utime",
+ "transpose-array",
+ "truncate",
+ "truncate-file",
+ "truncate-quotient",
+ "truncate-remainder",
+ "truncate/",
+ "try-load-module",
+ "try-module-autoload",
+ "ttyname",
+ "typed-array?",
+ "tzset",
+ "u16vector",
+ "u16vector->list",
+ "u16vector-length",
+ "u16vector-ref",
+ "u16vector-set!",
+ "u16vector?",
+ "u32vector",
+ "u32vector->list",
+ "u32vector-length",
+ "u32vector-ref",
+ "u32vector-set!",
+ "u32vector?",
+ "u64vector",
+ "u64vector->list",
+ "u64vector-length",
+ "u64vector-ref",
+ "u64vector-set!",
+ "u64vector?",
+ "u8vector",
+ "u8vector->list",
+ "u8vector-length",
+ "u8vector-ref",
+ "u8vector-set!",
+ "u8vector?",
+ "ucs-range->char-set",
+ "ucs-range->char-set!",
+ "umask",
+ "uname",
+ "unfold",
+ "unfold-right",
+ "unmemoize-expression",
+ "unread-char",
+ "unread-string",
+ "unsetenv",
+ "unspecified?",
+ "unzip1",
+ "unzip2",
+ "unzip3",
+ "unzip4",
+ "unzip5",
+ "use-srfis",
+ "user-modules-declarative?",
+ "using-readline?",
+ "usleep",
+ "utime",
+ "utsname:machine",
+ "utsname:nodename",
+ "utsname:release",
+ "utsname:sysname",
+ "utsname:version",
+ "values",
+ "variable-bound?",
+ "variable-ref",
+ "variable-set!",
+ "variable-unset!",
+ "variable?",
+ "vector",
+ "vector->list",
+ "vector-copy",
+ "vector-fill!",
+ "vector-length",
+ "vector-move-left!",
+ "vector-move-right!",
+ "vector-ref",
+ "vector-set!",
+ "vector?",
+ "version",
+ "version-matches?",
+ "waitpid",
+ "warn",
+ "weak-key-hash-table?",
+ "weak-value-hash-table?",
+ "with-continuation-barrier",
+ "with-dynamic-state",
+ "with-error-to-file",
+ "with-error-to-port",
+ "with-error-to-string",
+ "with-exception-handler",
+ "with-fluid*",
+ "with-fluids*",
+ "with-input-from-file",
+ "with-input-from-port",
+ "with-input-from-string",
+ "with-output-to-file",
+ "with-output-to-port",
+ "with-output-to-string",
+ "with-throw-handler",
+ "write",
+ "write-char",
+ "xcons",
+ "xsubstring",
+ "zero?",
+ "zip",
+}
+
diff --git a/pygments/lexers/_scilab_builtins.py b/pygments/lexers/_scilab_builtins.py
new file mode 100644
index 0000000..c433bcf
--- /dev/null
+++ b/pygments/lexers/_scilab_builtins.py
@@ -0,0 +1,3093 @@
+"""
+ pygments.lexers._scilab_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Builtin list for the ScilabLexer.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Autogenerated
+
+commands_kw = (
+ 'abort',
+ 'apropos',
+ 'break',
+ 'case',
+ 'catch',
+ 'continue',
+ 'do',
+ 'else',
+ 'elseif',
+ 'end',
+ 'endfunction',
+ 'for',
+ 'function',
+ 'help',
+ 'if',
+ 'pause',
+ 'quit',
+ 'select',
+ 'then',
+ 'try',
+ 'while',
+)
+
+functions_kw = (
+ '!!_invoke_',
+ '%H5Object_e',
+ '%H5Object_fieldnames',
+ '%H5Object_p',
+ '%XMLAttr_6',
+ '%XMLAttr_e',
+ '%XMLAttr_i_XMLElem',
+ '%XMLAttr_length',
+ '%XMLAttr_p',
+ '%XMLAttr_size',
+ '%XMLDoc_6',
+ '%XMLDoc_e',
+ '%XMLDoc_i_XMLList',
+ '%XMLDoc_p',
+ '%XMLElem_6',
+ '%XMLElem_e',
+ '%XMLElem_i_XMLDoc',
+ '%XMLElem_i_XMLElem',
+ '%XMLElem_i_XMLList',
+ '%XMLElem_p',
+ '%XMLList_6',
+ '%XMLList_e',
+ '%XMLList_i_XMLElem',
+ '%XMLList_i_XMLList',
+ '%XMLList_length',
+ '%XMLList_p',
+ '%XMLList_size',
+ '%XMLNs_6',
+ '%XMLNs_e',
+ '%XMLNs_i_XMLElem',
+ '%XMLNs_p',
+ '%XMLSet_6',
+ '%XMLSet_e',
+ '%XMLSet_length',
+ '%XMLSet_p',
+ '%XMLSet_size',
+ '%XMLValid_p',
+ '%_EClass_6',
+ '%_EClass_e',
+ '%_EClass_p',
+ '%_EObj_0',
+ '%_EObj_1__EObj',
+ '%_EObj_1_b',
+ '%_EObj_1_c',
+ '%_EObj_1_i',
+ '%_EObj_1_s',
+ '%_EObj_2__EObj',
+ '%_EObj_2_b',
+ '%_EObj_2_c',
+ '%_EObj_2_i',
+ '%_EObj_2_s',
+ '%_EObj_3__EObj',
+ '%_EObj_3_b',
+ '%_EObj_3_c',
+ '%_EObj_3_i',
+ '%_EObj_3_s',
+ '%_EObj_4__EObj',
+ '%_EObj_4_b',
+ '%_EObj_4_c',
+ '%_EObj_4_i',
+ '%_EObj_4_s',
+ '%_EObj_5',
+ '%_EObj_6',
+ '%_EObj_a__EObj',
+ '%_EObj_a_b',
+ '%_EObj_a_c',
+ '%_EObj_a_i',
+ '%_EObj_a_s',
+ '%_EObj_d__EObj',
+ '%_EObj_d_b',
+ '%_EObj_d_c',
+ '%_EObj_d_i',
+ '%_EObj_d_s',
+ '%_EObj_disp',
+ '%_EObj_e',
+ '%_EObj_g__EObj',
+ '%_EObj_g_b',
+ '%_EObj_g_c',
+ '%_EObj_g_i',
+ '%_EObj_g_s',
+ '%_EObj_h__EObj',
+ '%_EObj_h_b',
+ '%_EObj_h_c',
+ '%_EObj_h_i',
+ '%_EObj_h_s',
+ '%_EObj_i__EObj',
+ '%_EObj_j__EObj',
+ '%_EObj_j_b',
+ '%_EObj_j_c',
+ '%_EObj_j_i',
+ '%_EObj_j_s',
+ '%_EObj_k__EObj',
+ '%_EObj_k_b',
+ '%_EObj_k_c',
+ '%_EObj_k_i',
+ '%_EObj_k_s',
+ '%_EObj_l__EObj',
+ '%_EObj_l_b',
+ '%_EObj_l_c',
+ '%_EObj_l_i',
+ '%_EObj_l_s',
+ '%_EObj_m__EObj',
+ '%_EObj_m_b',
+ '%_EObj_m_c',
+ '%_EObj_m_i',
+ '%_EObj_m_s',
+ '%_EObj_n__EObj',
+ '%_EObj_n_b',
+ '%_EObj_n_c',
+ '%_EObj_n_i',
+ '%_EObj_n_s',
+ '%_EObj_o__EObj',
+ '%_EObj_o_b',
+ '%_EObj_o_c',
+ '%_EObj_o_i',
+ '%_EObj_o_s',
+ '%_EObj_p',
+ '%_EObj_p__EObj',
+ '%_EObj_p_b',
+ '%_EObj_p_c',
+ '%_EObj_p_i',
+ '%_EObj_p_s',
+ '%_EObj_q__EObj',
+ '%_EObj_q_b',
+ '%_EObj_q_c',
+ '%_EObj_q_i',
+ '%_EObj_q_s',
+ '%_EObj_r__EObj',
+ '%_EObj_r_b',
+ '%_EObj_r_c',
+ '%_EObj_r_i',
+ '%_EObj_r_s',
+ '%_EObj_s__EObj',
+ '%_EObj_s_b',
+ '%_EObj_s_c',
+ '%_EObj_s_i',
+ '%_EObj_s_s',
+ '%_EObj_t',
+ '%_EObj_x__EObj',
+ '%_EObj_x_b',
+ '%_EObj_x_c',
+ '%_EObj_x_i',
+ '%_EObj_x_s',
+ '%_EObj_y__EObj',
+ '%_EObj_y_b',
+ '%_EObj_y_c',
+ '%_EObj_y_i',
+ '%_EObj_y_s',
+ '%_EObj_z__EObj',
+ '%_EObj_z_b',
+ '%_EObj_z_c',
+ '%_EObj_z_i',
+ '%_EObj_z_s',
+ '%_eigs',
+ '%_load',
+ '%b_1__EObj',
+ '%b_2__EObj',
+ '%b_3__EObj',
+ '%b_4__EObj',
+ '%b_a__EObj',
+ '%b_d__EObj',
+ '%b_g__EObj',
+ '%b_h__EObj',
+ '%b_i_XMLList',
+ '%b_i__EObj',
+ '%b_j__EObj',
+ '%b_k__EObj',
+ '%b_l__EObj',
+ '%b_m__EObj',
+ '%b_n__EObj',
+ '%b_o__EObj',
+ '%b_p__EObj',
+ '%b_q__EObj',
+ '%b_r__EObj',
+ '%b_s__EObj',
+ '%b_x__EObj',
+ '%b_y__EObj',
+ '%b_z__EObj',
+ '%c_1__EObj',
+ '%c_2__EObj',
+ '%c_3__EObj',
+ '%c_4__EObj',
+ '%c_a__EObj',
+ '%c_d__EObj',
+ '%c_g__EObj',
+ '%c_h__EObj',
+ '%c_i_XMLAttr',
+ '%c_i_XMLDoc',
+ '%c_i_XMLElem',
+ '%c_i_XMLList',
+ '%c_i__EObj',
+ '%c_j__EObj',
+ '%c_k__EObj',
+ '%c_l__EObj',
+ '%c_m__EObj',
+ '%c_n__EObj',
+ '%c_o__EObj',
+ '%c_p__EObj',
+ '%c_q__EObj',
+ '%c_r__EObj',
+ '%c_s__EObj',
+ '%c_x__EObj',
+ '%c_y__EObj',
+ '%c_z__EObj',
+ '%ce_i_XMLList',
+ '%fptr_i_XMLList',
+ '%h_i_XMLList',
+ '%hm_i_XMLList',
+ '%i_1__EObj',
+ '%i_2__EObj',
+ '%i_3__EObj',
+ '%i_4__EObj',
+ '%i_a__EObj',
+ '%i_abs',
+ '%i_cumprod',
+ '%i_cumsum',
+ '%i_d__EObj',
+ '%i_diag',
+ '%i_g__EObj',
+ '%i_h__EObj',
+ '%i_i_XMLList',
+ '%i_i__EObj',
+ '%i_j__EObj',
+ '%i_k__EObj',
+ '%i_l__EObj',
+ '%i_m__EObj',
+ '%i_matrix',
+ '%i_max',
+ '%i_maxi',
+ '%i_min',
+ '%i_mini',
+ '%i_mput',
+ '%i_n__EObj',
+ '%i_o__EObj',
+ '%i_p',
+ '%i_p__EObj',
+ '%i_prod',
+ '%i_q__EObj',
+ '%i_r__EObj',
+ '%i_s__EObj',
+ '%i_sum',
+ '%i_tril',
+ '%i_triu',
+ '%i_x__EObj',
+ '%i_y__EObj',
+ '%i_z__EObj',
+ '%ip_i_XMLList',
+ '%l_i_XMLList',
+ '%l_i__EObj',
+ '%lss_i_XMLList',
+ '%mc_i_XMLList',
+ '%msp_full',
+ '%msp_i_XMLList',
+ '%msp_spget',
+ '%p_i_XMLList',
+ '%ptr_i_XMLList',
+ '%r_i_XMLList',
+ '%s_1__EObj',
+ '%s_2__EObj',
+ '%s_3__EObj',
+ '%s_4__EObj',
+ '%s_a__EObj',
+ '%s_d__EObj',
+ '%s_g__EObj',
+ '%s_h__EObj',
+ '%s_i_XMLList',
+ '%s_i__EObj',
+ '%s_j__EObj',
+ '%s_k__EObj',
+ '%s_l__EObj',
+ '%s_m__EObj',
+ '%s_n__EObj',
+ '%s_o__EObj',
+ '%s_p__EObj',
+ '%s_q__EObj',
+ '%s_r__EObj',
+ '%s_s__EObj',
+ '%s_x__EObj',
+ '%s_y__EObj',
+ '%s_z__EObj',
+ '%sp_i_XMLList',
+ '%spb_i_XMLList',
+ '%st_i_XMLList',
+ 'Calendar',
+ 'ClipBoard',
+ 'Matplot',
+ 'Matplot1',
+ 'PlaySound',
+ 'TCL_DeleteInterp',
+ 'TCL_DoOneEvent',
+ 'TCL_EvalFile',
+ 'TCL_EvalStr',
+ 'TCL_ExistArray',
+ 'TCL_ExistInterp',
+ 'TCL_ExistVar',
+ 'TCL_GetVar',
+ 'TCL_GetVersion',
+ 'TCL_SetVar',
+ 'TCL_UnsetVar',
+ 'TCL_UpVar',
+ '_',
+ '_code2str',
+ '_d',
+ '_str2code',
+ 'about',
+ 'abs',
+ 'acos',
+ 'addModulePreferences',
+ 'addcolor',
+ 'addf',
+ 'addhistory',
+ 'addinter',
+ 'addlocalizationdomain',
+ 'amell',
+ 'and',
+ 'argn',
+ 'arl2_ius',
+ 'ascii',
+ 'asin',
+ 'atan',
+ 'backslash',
+ 'balanc',
+ 'banner',
+ 'base2dec',
+ 'basename',
+ 'bdiag',
+ 'beep',
+ 'besselh',
+ 'besseli',
+ 'besselj',
+ 'besselk',
+ 'bessely',
+ 'beta',
+ 'bezout',
+ 'bfinit',
+ 'blkfc1i',
+ 'blkslvi',
+ 'bool2s',
+ 'browsehistory',
+ 'browsevar',
+ 'bsplin3val',
+ 'buildDoc',
+ 'buildouttb',
+ 'bvode',
+ 'c_link',
+ 'call',
+ 'callblk',
+ 'captions',
+ 'cd',
+ 'cdfbet',
+ 'cdfbin',
+ 'cdfchi',
+ 'cdfchn',
+ 'cdff',
+ 'cdffnc',
+ 'cdfgam',
+ 'cdfnbn',
+ 'cdfnor',
+ 'cdfpoi',
+ 'cdft',
+ 'ceil',
+ 'champ',
+ 'champ1',
+ 'chdir',
+ 'chol',
+ 'clc',
+ 'clean',
+ 'clear',
+ 'clearfun',
+ 'clearglobal',
+ 'closeEditor',
+ 'closeEditvar',
+ 'closeXcos',
+ 'code2str',
+ 'coeff',
+ 'color',
+ 'comp',
+ 'completion',
+ 'conj',
+ 'contour2di',
+ 'contr',
+ 'conv2',
+ 'convstr',
+ 'copy',
+ 'copyfile',
+ 'corr',
+ 'cos',
+ 'coserror',
+ 'createdir',
+ 'cshep2d',
+ 'csvDefault',
+ 'csvIsnum',
+ 'csvRead',
+ 'csvStringToDouble',
+ 'csvTextScan',
+ 'csvWrite',
+ 'ctree2',
+ 'ctree3',
+ 'ctree4',
+ 'cumprod',
+ 'cumsum',
+ 'curblock',
+ 'curblockc',
+ 'daskr',
+ 'dasrt',
+ 'dassl',
+ 'data2sig',
+ 'datatipCreate',
+ 'datatipManagerMode',
+ 'datatipMove',
+ 'datatipRemove',
+ 'datatipSetDisplay',
+ 'datatipSetInterp',
+ 'datatipSetOrientation',
+ 'datatipSetStyle',
+ 'datatipToggle',
+ 'dawson',
+ 'dct',
+ 'debug',
+ 'dec2base',
+ 'deff',
+ 'definedfields',
+ 'degree',
+ 'delbpt',
+ 'delete',
+ 'deletefile',
+ 'delip',
+ 'delmenu',
+ 'det',
+ 'dgettext',
+ 'dhinf',
+ 'diag',
+ 'diary',
+ 'diffobjs',
+ 'disp',
+ 'dispbpt',
+ 'displayhistory',
+ 'disposefftwlibrary',
+ 'dlgamma',
+ 'dnaupd',
+ 'dneupd',
+ 'double',
+ 'drawaxis',
+ 'drawlater',
+ 'drawnow',
+ 'driver',
+ 'dsaupd',
+ 'dsearch',
+ 'dseupd',
+ 'dst',
+ 'duplicate',
+ 'editvar',
+ 'emptystr',
+ 'end_scicosim',
+ 'ereduc',
+ 'erf',
+ 'erfc',
+ 'erfcx',
+ 'erfi',
+ 'errcatch',
+ 'errclear',
+ 'error',
+ 'eval_cshep2d',
+ 'exec',
+ 'execstr',
+ 'exists',
+ 'exit',
+ 'exp',
+ 'expm',
+ 'exportUI',
+ 'export_to_hdf5',
+ 'eye',
+ 'fadj2sp',
+ 'fec',
+ 'feval',
+ 'fft',
+ 'fftw',
+ 'fftw_flags',
+ 'fftw_forget_wisdom',
+ 'fftwlibraryisloaded',
+ 'figure',
+ 'file',
+ 'filebrowser',
+ 'fileext',
+ 'fileinfo',
+ 'fileparts',
+ 'filesep',
+ 'find',
+ 'findBD',
+ 'findfiles',
+ 'fire_closing_finished',
+ 'floor',
+ 'format',
+ 'fort',
+ 'fprintfMat',
+ 'freq',
+ 'frexp',
+ 'fromc',
+ 'fromjava',
+ 'fscanfMat',
+ 'fsolve',
+ 'fstair',
+ 'full',
+ 'fullpath',
+ 'funcprot',
+ 'funptr',
+ 'gamma',
+ 'gammaln',
+ 'geom3d',
+ 'get',
+ 'getURL',
+ 'get_absolute_file_path',
+ 'get_fftw_wisdom',
+ 'getblocklabel',
+ 'getcallbackobject',
+ 'getdate',
+ 'getdebuginfo',
+ 'getdefaultlanguage',
+ 'getdrives',
+ 'getdynlibext',
+ 'getenv',
+ 'getfield',
+ 'gethistory',
+ 'gethistoryfile',
+ 'getinstalledlookandfeels',
+ 'getio',
+ 'getlanguage',
+ 'getlongpathname',
+ 'getlookandfeel',
+ 'getmd5',
+ 'getmemory',
+ 'getmodules',
+ 'getos',
+ 'getpid',
+ 'getrelativefilename',
+ 'getscicosvars',
+ 'getscilabmode',
+ 'getshortpathname',
+ 'gettext',
+ 'getvariablesonstack',
+ 'getversion',
+ 'glist',
+ 'global',
+ 'glue',
+ 'grand',
+ 'graphicfunction',
+ 'grayplot',
+ 'grep',
+ 'gsort',
+ 'gstacksize',
+ 'h5attr',
+ 'h5close',
+ 'h5cp',
+ 'h5dataset',
+ 'h5dump',
+ 'h5exists',
+ 'h5flush',
+ 'h5get',
+ 'h5group',
+ 'h5isArray',
+ 'h5isAttr',
+ 'h5isCompound',
+ 'h5isFile',
+ 'h5isGroup',
+ 'h5isList',
+ 'h5isRef',
+ 'h5isSet',
+ 'h5isSpace',
+ 'h5isType',
+ 'h5isVlen',
+ 'h5label',
+ 'h5ln',
+ 'h5ls',
+ 'h5mount',
+ 'h5mv',
+ 'h5open',
+ 'h5read',
+ 'h5readattr',
+ 'h5rm',
+ 'h5umount',
+ 'h5write',
+ 'h5writeattr',
+ 'havewindow',
+ 'helpbrowser',
+ 'hess',
+ 'hinf',
+ 'historymanager',
+ 'historysize',
+ 'host',
+ 'htmlDump',
+ 'htmlRead',
+ 'htmlReadStr',
+ 'htmlWrite',
+ 'iconvert',
+ 'ieee',
+ 'ilib_verbose',
+ 'imag',
+ 'impl',
+ 'import_from_hdf5',
+ 'imult',
+ 'inpnvi',
+ 'int',
+ 'int16',
+ 'int2d',
+ 'int32',
+ 'int3d',
+ 'int8',
+ 'interp',
+ 'interp2d',
+ 'interp3d',
+ 'intg',
+ 'intppty',
+ 'inttype',
+ 'inv',
+ 'invoke_lu',
+ 'is_handle_valid',
+ 'is_hdf5_file',
+ 'isalphanum',
+ 'isascii',
+ 'isdef',
+ 'isdigit',
+ 'isdir',
+ 'isequal',
+ 'isequalbitwise',
+ 'iserror',
+ 'isfile',
+ 'isglobal',
+ 'isletter',
+ 'isnum',
+ 'isreal',
+ 'iswaitingforinput',
+ 'jallowClassReloading',
+ 'jarray',
+ 'jautoTranspose',
+ 'jautoUnwrap',
+ 'javaclasspath',
+ 'javalibrarypath',
+ 'jcast',
+ 'jcompile',
+ 'jconvMatrixMethod',
+ 'jcreatejar',
+ 'jdeff',
+ 'jdisableTrace',
+ 'jenableTrace',
+ 'jexists',
+ 'jgetclassname',
+ 'jgetfield',
+ 'jgetfields',
+ 'jgetinfo',
+ 'jgetmethods',
+ 'jimport',
+ 'jinvoke',
+ 'jinvoke_db',
+ 'jnewInstance',
+ 'jremove',
+ 'jsetfield',
+ 'junwrap',
+ 'junwraprem',
+ 'jwrap',
+ 'jwrapinfloat',
+ 'kron',
+ 'lasterror',
+ 'ldiv',
+ 'ldivf',
+ 'legendre',
+ 'length',
+ 'lib',
+ 'librarieslist',
+ 'libraryinfo',
+ 'light',
+ 'linear_interpn',
+ 'lines',
+ 'link',
+ 'linmeq',
+ 'list',
+ 'listvar_in_hdf5',
+ 'load',
+ 'loadGui',
+ 'loadScicos',
+ 'loadXcos',
+ 'loadfftwlibrary',
+ 'loadhistory',
+ 'log',
+ 'log1p',
+ 'lsq',
+ 'lsq_splin',
+ 'lsqrsolve',
+ 'lsslist',
+ 'lstcat',
+ 'lstsize',
+ 'ltitr',
+ 'lu',
+ 'ludel',
+ 'lufact',
+ 'luget',
+ 'lusolve',
+ 'macr2lst',
+ 'macr2tree',
+ 'matfile_close',
+ 'matfile_listvar',
+ 'matfile_open',
+ 'matfile_varreadnext',
+ 'matfile_varwrite',
+ 'matrix',
+ 'max',
+ 'maxfiles',
+ 'mclearerr',
+ 'mclose',
+ 'meof',
+ 'merror',
+ 'messagebox',
+ 'mfprintf',
+ 'mfscanf',
+ 'mget',
+ 'mgeti',
+ 'mgetl',
+ 'mgetstr',
+ 'min',
+ 'mlist',
+ 'mode',
+ 'model2blk',
+ 'mopen',
+ 'move',
+ 'movefile',
+ 'mprintf',
+ 'mput',
+ 'mputl',
+ 'mputstr',
+ 'mscanf',
+ 'mseek',
+ 'msprintf',
+ 'msscanf',
+ 'mtell',
+ 'mtlb_mode',
+ 'mtlb_sparse',
+ 'mucomp',
+ 'mulf',
+ 'name2rgb',
+ 'nearfloat',
+ 'newaxes',
+ 'newest',
+ 'newfun',
+ 'nnz',
+ 'norm',
+ 'notify',
+ 'number_properties',
+ 'ode',
+ 'odedc',
+ 'ones',
+ 'openged',
+ 'opentk',
+ 'optim',
+ 'or',
+ 'ordmmd',
+ 'parallel_concurrency',
+ 'parallel_run',
+ 'param3d',
+ 'param3d1',
+ 'part',
+ 'pathconvert',
+ 'pathsep',
+ 'phase_simulation',
+ 'plot2d',
+ 'plot2d1',
+ 'plot2d2',
+ 'plot2d3',
+ 'plot2d4',
+ 'plot3d',
+ 'plot3d1',
+ 'plotbrowser',
+ 'pointer_xproperty',
+ 'poly',
+ 'ppol',
+ 'pppdiv',
+ 'predef',
+ 'preferences',
+ 'print',
+ 'printf',
+ 'printfigure',
+ 'printsetupbox',
+ 'prod',
+ 'progressionbar',
+ 'prompt',
+ 'pwd',
+ 'qld',
+ 'qp_solve',
+ 'qr',
+ 'raise_window',
+ 'rand',
+ 'rankqr',
+ 'rat',
+ 'rcond',
+ 'rdivf',
+ 'read',
+ 'read4b',
+ 'read_csv',
+ 'readb',
+ 'readgateway',
+ 'readmps',
+ 'real',
+ 'realtime',
+ 'realtimeinit',
+ 'regexp',
+ 'relocate_handle',
+ 'remez',
+ 'removeModulePreferences',
+ 'removedir',
+ 'removelinehistory',
+ 'res_with_prec',
+ 'resethistory',
+ 'residu',
+ 'resume',
+ 'return',
+ 'ricc',
+ 'rlist',
+ 'roots',
+ 'rotate_axes',
+ 'round',
+ 'rpem',
+ 'rtitr',
+ 'rubberbox',
+ 'save',
+ 'saveGui',
+ 'saveafterncommands',
+ 'saveconsecutivecommands',
+ 'savehistory',
+ 'schur',
+ 'sci_haltscicos',
+ 'sci_tree2',
+ 'sci_tree3',
+ 'sci_tree4',
+ 'sciargs',
+ 'scicos_debug',
+ 'scicos_debug_count',
+ 'scicos_time',
+ 'scicosim',
+ 'scinotes',
+ 'sctree',
+ 'semidef',
+ 'set',
+ 'set_blockerror',
+ 'set_fftw_wisdom',
+ 'set_xproperty',
+ 'setbpt',
+ 'setdefaultlanguage',
+ 'setenv',
+ 'setfield',
+ 'sethistoryfile',
+ 'setlanguage',
+ 'setlookandfeel',
+ 'setmenu',
+ 'sfact',
+ 'sfinit',
+ 'show_window',
+ 'sident',
+ 'sig2data',
+ 'sign',
+ 'simp',
+ 'simp_mode',
+ 'sin',
+ 'size',
+ 'slash',
+ 'sleep',
+ 'sorder',
+ 'sparse',
+ 'spchol',
+ 'spcompack',
+ 'spec',
+ 'spget',
+ 'splin',
+ 'splin2d',
+ 'splin3d',
+ 'splitURL',
+ 'spones',
+ 'sprintf',
+ 'sqrt',
+ 'stacksize',
+ 'str2code',
+ 'strcat',
+ 'strchr',
+ 'strcmp',
+ 'strcspn',
+ 'strindex',
+ 'string',
+ 'stringbox',
+ 'stripblanks',
+ 'strncpy',
+ 'strrchr',
+ 'strrev',
+ 'strsplit',
+ 'strspn',
+ 'strstr',
+ 'strsubst',
+ 'strtod',
+ 'strtok',
+ 'subf',
+ 'sum',
+ 'svd',
+ 'swap_handles',
+ 'symfcti',
+ 'syredi',
+ 'system_getproperty',
+ 'system_setproperty',
+ 'ta2lpd',
+ 'tan',
+ 'taucs_chdel',
+ 'taucs_chfact',
+ 'taucs_chget',
+ 'taucs_chinfo',
+ 'taucs_chsolve',
+ 'tempname',
+ 'testmatrix',
+ 'timer',
+ 'tlist',
+ 'tohome',
+ 'tokens',
+ 'toolbar',
+ 'toprint',
+ 'tr_zer',
+ 'tril',
+ 'triu',
+ 'type',
+ 'typename',
+ 'uiDisplayTree',
+ 'uicontextmenu',
+ 'uicontrol',
+ 'uigetcolor',
+ 'uigetdir',
+ 'uigetfile',
+ 'uigetfont',
+ 'uimenu',
+ 'uint16',
+ 'uint32',
+ 'uint8',
+ 'uipopup',
+ 'uiputfile',
+ 'uiwait',
+ 'ulink',
+ 'umf_ludel',
+ 'umf_lufact',
+ 'umf_luget',
+ 'umf_luinfo',
+ 'umf_lusolve',
+ 'umfpack',
+ 'unglue',
+ 'unix',
+ 'unsetmenu',
+ 'unzoom',
+ 'updatebrowsevar',
+ 'usecanvas',
+ 'useeditor',
+ 'user',
+ 'var2vec',
+ 'varn',
+ 'vec2var',
+ 'waitbar',
+ 'warnBlockByUID',
+ 'warning',
+ 'what',
+ 'where',
+ 'whereis',
+ 'who',
+ 'winsid',
+ 'with_module',
+ 'writb',
+ 'write',
+ 'write4b',
+ 'write_csv',
+ 'x_choose',
+ 'x_choose_modeless',
+ 'x_dialog',
+ 'x_mdialog',
+ 'xarc',
+ 'xarcs',
+ 'xarrows',
+ 'xchange',
+ 'xchoicesi',
+ 'xclick',
+ 'xcos',
+ 'xcosAddToolsMenu',
+ 'xcosConfigureXmlFile',
+ 'xcosDiagramToScilab',
+ 'xcosPalCategoryAdd',
+ 'xcosPalDelete',
+ 'xcosPalDisable',
+ 'xcosPalEnable',
+ 'xcosPalGenerateIcon',
+ 'xcosPalGet',
+ 'xcosPalLoad',
+ 'xcosPalMove',
+ 'xcosSimulationStarted',
+ 'xcosUpdateBlock',
+ 'xdel',
+ 'xend',
+ 'xfarc',
+ 'xfarcs',
+ 'xfpoly',
+ 'xfpolys',
+ 'xfrect',
+ 'xget',
+ 'xgetmouse',
+ 'xgraduate',
+ 'xgrid',
+ 'xinit',
+ 'xlfont',
+ 'xls_open',
+ 'xls_read',
+ 'xmlAddNs',
+ 'xmlAppend',
+ 'xmlAsNumber',
+ 'xmlAsText',
+ 'xmlDTD',
+ 'xmlDelete',
+ 'xmlDocument',
+ 'xmlDump',
+ 'xmlElement',
+ 'xmlFormat',
+ 'xmlGetNsByHref',
+ 'xmlGetNsByPrefix',
+ 'xmlGetOpenDocs',
+ 'xmlIsValidObject',
+ 'xmlName',
+ 'xmlNs',
+ 'xmlRead',
+ 'xmlReadStr',
+ 'xmlRelaxNG',
+ 'xmlRemove',
+ 'xmlSchema',
+ 'xmlSetAttributes',
+ 'xmlValidate',
+ 'xmlWrite',
+ 'xmlXPath',
+ 'xname',
+ 'xpause',
+ 'xpoly',
+ 'xpolys',
+ 'xrect',
+ 'xrects',
+ 'xs2bmp',
+ 'xs2emf',
+ 'xs2eps',
+ 'xs2gif',
+ 'xs2jpg',
+ 'xs2pdf',
+ 'xs2png',
+ 'xs2ppm',
+ 'xs2ps',
+ 'xs2svg',
+ 'xsegs',
+ 'xset',
+ 'xstring',
+ 'xstringb',
+ 'xtitle',
+ 'zeros',
+ 'znaupd',
+ 'zneupd',
+ 'zoom_rect',
+)
+
+macros_kw = (
+ '!_deff_wrapper',
+ '%0_i_st',
+ '%3d_i_h',
+ '%Block_xcosUpdateBlock',
+ '%TNELDER_p',
+ '%TNELDER_string',
+ '%TNMPLOT_p',
+ '%TNMPLOT_string',
+ '%TOPTIM_p',
+ '%TOPTIM_string',
+ '%TSIMPLEX_p',
+ '%TSIMPLEX_string',
+ '%_EVoid_p',
+ '%_gsort',
+ '%_listvarinfile',
+ '%_rlist',
+ '%_save',
+ '%_sodload',
+ '%_strsplit',
+ '%_unwrap',
+ '%ar_p',
+ '%asn',
+ '%b_a_b',
+ '%b_a_s',
+ '%b_c_s',
+ '%b_c_spb',
+ '%b_cumprod',
+ '%b_cumsum',
+ '%b_d_s',
+ '%b_diag',
+ '%b_e',
+ '%b_f_s',
+ '%b_f_spb',
+ '%b_g_s',
+ '%b_g_spb',
+ '%b_grand',
+ '%b_h_s',
+ '%b_h_spb',
+ '%b_i_b',
+ '%b_i_ce',
+ '%b_i_h',
+ '%b_i_hm',
+ '%b_i_s',
+ '%b_i_sp',
+ '%b_i_spb',
+ '%b_i_st',
+ '%b_iconvert',
+ '%b_l_b',
+ '%b_l_s',
+ '%b_m_b',
+ '%b_m_s',
+ '%b_matrix',
+ '%b_n_hm',
+ '%b_o_hm',
+ '%b_p_s',
+ '%b_prod',
+ '%b_r_b',
+ '%b_r_s',
+ '%b_s_b',
+ '%b_s_s',
+ '%b_string',
+ '%b_sum',
+ '%b_tril',
+ '%b_triu',
+ '%b_x_b',
+ '%b_x_s',
+ '%bicg',
+ '%bicgstab',
+ '%c_a_c',
+ '%c_b_c',
+ '%c_b_s',
+ '%c_diag',
+ '%c_dsearch',
+ '%c_e',
+ '%c_eye',
+ '%c_f_s',
+ '%c_grand',
+ '%c_i_c',
+ '%c_i_ce',
+ '%c_i_h',
+ '%c_i_hm',
+ '%c_i_lss',
+ '%c_i_r',
+ '%c_i_s',
+ '%c_i_st',
+ '%c_matrix',
+ '%c_n_l',
+ '%c_n_st',
+ '%c_o_l',
+ '%c_o_st',
+ '%c_ones',
+ '%c_rand',
+ '%c_tril',
+ '%c_triu',
+ '%cblock_c_cblock',
+ '%cblock_c_s',
+ '%cblock_e',
+ '%cblock_f_cblock',
+ '%cblock_p',
+ '%cblock_size',
+ '%ce_6',
+ '%ce_c_ce',
+ '%ce_e',
+ '%ce_f_ce',
+ '%ce_i_ce',
+ '%ce_i_s',
+ '%ce_i_st',
+ '%ce_matrix',
+ '%ce_p',
+ '%ce_size',
+ '%ce_string',
+ '%ce_t',
+ '%cgs',
+ '%champdat_i_h',
+ '%choose',
+ '%diagram_xcos',
+ '%dir_p',
+ '%fptr_i_st',
+ '%grand_perm',
+ '%grayplot_i_h',
+ '%h_i_st',
+ '%hmS_k_hmS_generic',
+ '%hm_1_hm',
+ '%hm_1_s',
+ '%hm_2_hm',
+ '%hm_2_s',
+ '%hm_3_hm',
+ '%hm_3_s',
+ '%hm_4_hm',
+ '%hm_4_s',
+ '%hm_5',
+ '%hm_a_hm',
+ '%hm_a_r',
+ '%hm_a_s',
+ '%hm_abs',
+ '%hm_and',
+ '%hm_bool2s',
+ '%hm_c_hm',
+ '%hm_ceil',
+ '%hm_conj',
+ '%hm_cos',
+ '%hm_cumprod',
+ '%hm_cumsum',
+ '%hm_d_hm',
+ '%hm_d_s',
+ '%hm_degree',
+ '%hm_dsearch',
+ '%hm_e',
+ '%hm_exp',
+ '%hm_eye',
+ '%hm_f_hm',
+ '%hm_find',
+ '%hm_floor',
+ '%hm_g_hm',
+ '%hm_grand',
+ '%hm_gsort',
+ '%hm_h_hm',
+ '%hm_i_b',
+ '%hm_i_ce',
+ '%hm_i_h',
+ '%hm_i_hm',
+ '%hm_i_i',
+ '%hm_i_p',
+ '%hm_i_r',
+ '%hm_i_s',
+ '%hm_i_st',
+ '%hm_iconvert',
+ '%hm_imag',
+ '%hm_int',
+ '%hm_isnan',
+ '%hm_isreal',
+ '%hm_j_hm',
+ '%hm_j_s',
+ '%hm_k_hm',
+ '%hm_k_s',
+ '%hm_log',
+ '%hm_m_p',
+ '%hm_m_r',
+ '%hm_m_s',
+ '%hm_matrix',
+ '%hm_max',
+ '%hm_mean',
+ '%hm_median',
+ '%hm_min',
+ '%hm_n_b',
+ '%hm_n_c',
+ '%hm_n_hm',
+ '%hm_n_i',
+ '%hm_n_p',
+ '%hm_n_s',
+ '%hm_o_b',
+ '%hm_o_c',
+ '%hm_o_hm',
+ '%hm_o_i',
+ '%hm_o_p',
+ '%hm_o_s',
+ '%hm_ones',
+ '%hm_or',
+ '%hm_p',
+ '%hm_prod',
+ '%hm_q_hm',
+ '%hm_r_s',
+ '%hm_rand',
+ '%hm_real',
+ '%hm_round',
+ '%hm_s',
+ '%hm_s_hm',
+ '%hm_s_r',
+ '%hm_s_s',
+ '%hm_sign',
+ '%hm_sin',
+ '%hm_size',
+ '%hm_sqrt',
+ '%hm_stdev',
+ '%hm_string',
+ '%hm_sum',
+ '%hm_x_hm',
+ '%hm_x_p',
+ '%hm_x_s',
+ '%hm_zeros',
+ '%i_1_s',
+ '%i_2_s',
+ '%i_3_s',
+ '%i_4_s',
+ '%i_Matplot',
+ '%i_a_i',
+ '%i_a_s',
+ '%i_and',
+ '%i_ascii',
+ '%i_b_s',
+ '%i_bezout',
+ '%i_champ',
+ '%i_champ1',
+ '%i_contour',
+ '%i_contour2d',
+ '%i_d_i',
+ '%i_d_s',
+ '%i_dsearch',
+ '%i_e',
+ '%i_fft',
+ '%i_g_i',
+ '%i_gcd',
+ '%i_grand',
+ '%i_h_i',
+ '%i_i_ce',
+ '%i_i_h',
+ '%i_i_hm',
+ '%i_i_i',
+ '%i_i_s',
+ '%i_i_st',
+ '%i_j_i',
+ '%i_j_s',
+ '%i_l_s',
+ '%i_lcm',
+ '%i_length',
+ '%i_m_i',
+ '%i_m_s',
+ '%i_mfprintf',
+ '%i_mprintf',
+ '%i_msprintf',
+ '%i_n_s',
+ '%i_o_s',
+ '%i_or',
+ '%i_p_i',
+ '%i_p_s',
+ '%i_plot2d',
+ '%i_plot2d1',
+ '%i_plot2d2',
+ '%i_q_s',
+ '%i_r_i',
+ '%i_r_s',
+ '%i_round',
+ '%i_s_i',
+ '%i_s_s',
+ '%i_sign',
+ '%i_string',
+ '%i_x_i',
+ '%i_x_s',
+ '%ip_a_s',
+ '%ip_i_st',
+ '%ip_m_s',
+ '%ip_n_ip',
+ '%ip_o_ip',
+ '%ip_p',
+ '%ip_part',
+ '%ip_s_s',
+ '%ip_string',
+ '%k',
+ '%l_i_h',
+ '%l_i_s',
+ '%l_i_st',
+ '%l_isequal',
+ '%l_n_c',
+ '%l_n_l',
+ '%l_n_m',
+ '%l_n_p',
+ '%l_n_s',
+ '%l_n_st',
+ '%l_o_c',
+ '%l_o_l',
+ '%l_o_m',
+ '%l_o_p',
+ '%l_o_s',
+ '%l_o_st',
+ '%lss_a_lss',
+ '%lss_a_p',
+ '%lss_a_r',
+ '%lss_a_s',
+ '%lss_c_lss',
+ '%lss_c_p',
+ '%lss_c_r',
+ '%lss_c_s',
+ '%lss_e',
+ '%lss_eye',
+ '%lss_f_lss',
+ '%lss_f_p',
+ '%lss_f_r',
+ '%lss_f_s',
+ '%lss_i_ce',
+ '%lss_i_lss',
+ '%lss_i_p',
+ '%lss_i_r',
+ '%lss_i_s',
+ '%lss_i_st',
+ '%lss_inv',
+ '%lss_l_lss',
+ '%lss_l_p',
+ '%lss_l_r',
+ '%lss_l_s',
+ '%lss_m_lss',
+ '%lss_m_p',
+ '%lss_m_r',
+ '%lss_m_s',
+ '%lss_n_lss',
+ '%lss_n_p',
+ '%lss_n_r',
+ '%lss_n_s',
+ '%lss_norm',
+ '%lss_o_lss',
+ '%lss_o_p',
+ '%lss_o_r',
+ '%lss_o_s',
+ '%lss_ones',
+ '%lss_r_lss',
+ '%lss_r_p',
+ '%lss_r_r',
+ '%lss_r_s',
+ '%lss_rand',
+ '%lss_s',
+ '%lss_s_lss',
+ '%lss_s_p',
+ '%lss_s_r',
+ '%lss_s_s',
+ '%lss_size',
+ '%lss_t',
+ '%lss_v_lss',
+ '%lss_v_p',
+ '%lss_v_r',
+ '%lss_v_s',
+ '%lt_i_s',
+ '%m_n_l',
+ '%m_o_l',
+ '%mc_i_h',
+ '%mc_i_s',
+ '%mc_i_st',
+ '%mc_n_st',
+ '%mc_o_st',
+ '%mc_string',
+ '%mps_p',
+ '%mps_string',
+ '%msp_a_s',
+ '%msp_abs',
+ '%msp_e',
+ '%msp_find',
+ '%msp_i_s',
+ '%msp_i_st',
+ '%msp_length',
+ '%msp_m_s',
+ '%msp_maxi',
+ '%msp_n_msp',
+ '%msp_nnz',
+ '%msp_o_msp',
+ '%msp_p',
+ '%msp_sparse',
+ '%msp_spones',
+ '%msp_t',
+ '%p_a_lss',
+ '%p_a_r',
+ '%p_c_lss',
+ '%p_c_r',
+ '%p_cumprod',
+ '%p_cumsum',
+ '%p_d_p',
+ '%p_d_r',
+ '%p_d_s',
+ '%p_det',
+ '%p_e',
+ '%p_f_lss',
+ '%p_f_r',
+ '%p_grand',
+ '%p_i_ce',
+ '%p_i_h',
+ '%p_i_hm',
+ '%p_i_lss',
+ '%p_i_p',
+ '%p_i_r',
+ '%p_i_s',
+ '%p_i_st',
+ '%p_inv',
+ '%p_j_s',
+ '%p_k_p',
+ '%p_k_r',
+ '%p_k_s',
+ '%p_l_lss',
+ '%p_l_p',
+ '%p_l_r',
+ '%p_l_s',
+ '%p_m_hm',
+ '%p_m_lss',
+ '%p_m_r',
+ '%p_matrix',
+ '%p_n_l',
+ '%p_n_lss',
+ '%p_n_r',
+ '%p_o_l',
+ '%p_o_lss',
+ '%p_o_r',
+ '%p_o_sp',
+ '%p_p_s',
+ '%p_part',
+ '%p_prod',
+ '%p_q_p',
+ '%p_q_r',
+ '%p_q_s',
+ '%p_r_lss',
+ '%p_r_p',
+ '%p_r_r',
+ '%p_r_s',
+ '%p_s_lss',
+ '%p_s_r',
+ '%p_simp',
+ '%p_string',
+ '%p_sum',
+ '%p_v_lss',
+ '%p_v_p',
+ '%p_v_r',
+ '%p_v_s',
+ '%p_x_hm',
+ '%p_x_r',
+ '%p_y_p',
+ '%p_y_r',
+ '%p_y_s',
+ '%p_z_p',
+ '%p_z_r',
+ '%p_z_s',
+ '%pcg',
+ '%plist_p',
+ '%plist_string',
+ '%r_0',
+ '%r_a_hm',
+ '%r_a_lss',
+ '%r_a_p',
+ '%r_a_r',
+ '%r_a_s',
+ '%r_c_lss',
+ '%r_c_p',
+ '%r_c_r',
+ '%r_c_s',
+ '%r_clean',
+ '%r_cumprod',
+ '%r_cumsum',
+ '%r_d_p',
+ '%r_d_r',
+ '%r_d_s',
+ '%r_det',
+ '%r_diag',
+ '%r_e',
+ '%r_eye',
+ '%r_f_lss',
+ '%r_f_p',
+ '%r_f_r',
+ '%r_f_s',
+ '%r_i_ce',
+ '%r_i_hm',
+ '%r_i_lss',
+ '%r_i_p',
+ '%r_i_r',
+ '%r_i_s',
+ '%r_i_st',
+ '%r_inv',
+ '%r_j_s',
+ '%r_k_p',
+ '%r_k_r',
+ '%r_k_s',
+ '%r_l_lss',
+ '%r_l_p',
+ '%r_l_r',
+ '%r_l_s',
+ '%r_m_hm',
+ '%r_m_lss',
+ '%r_m_p',
+ '%r_m_r',
+ '%r_m_s',
+ '%r_matrix',
+ '%r_n_lss',
+ '%r_n_p',
+ '%r_n_r',
+ '%r_n_s',
+ '%r_norm',
+ '%r_o_lss',
+ '%r_o_p',
+ '%r_o_r',
+ '%r_o_s',
+ '%r_ones',
+ '%r_p',
+ '%r_p_s',
+ '%r_prod',
+ '%r_q_p',
+ '%r_q_r',
+ '%r_q_s',
+ '%r_r_lss',
+ '%r_r_p',
+ '%r_r_r',
+ '%r_r_s',
+ '%r_rand',
+ '%r_s',
+ '%r_s_hm',
+ '%r_s_lss',
+ '%r_s_p',
+ '%r_s_r',
+ '%r_s_s',
+ '%r_simp',
+ '%r_size',
+ '%r_string',
+ '%r_sum',
+ '%r_t',
+ '%r_tril',
+ '%r_triu',
+ '%r_v_lss',
+ '%r_v_p',
+ '%r_v_r',
+ '%r_v_s',
+ '%r_varn',
+ '%r_x_p',
+ '%r_x_r',
+ '%r_x_s',
+ '%r_y_p',
+ '%r_y_r',
+ '%r_y_s',
+ '%r_z_p',
+ '%r_z_r',
+ '%r_z_s',
+ '%s_1_hm',
+ '%s_1_i',
+ '%s_2_hm',
+ '%s_2_i',
+ '%s_3_hm',
+ '%s_3_i',
+ '%s_4_hm',
+ '%s_4_i',
+ '%s_5',
+ '%s_a_b',
+ '%s_a_hm',
+ '%s_a_i',
+ '%s_a_ip',
+ '%s_a_lss',
+ '%s_a_msp',
+ '%s_a_r',
+ '%s_a_sp',
+ '%s_and',
+ '%s_b_i',
+ '%s_b_s',
+ '%s_bezout',
+ '%s_c_b',
+ '%s_c_cblock',
+ '%s_c_lss',
+ '%s_c_r',
+ '%s_c_sp',
+ '%s_d_b',
+ '%s_d_i',
+ '%s_d_p',
+ '%s_d_r',
+ '%s_d_sp',
+ '%s_e',
+ '%s_f_b',
+ '%s_f_cblock',
+ '%s_f_lss',
+ '%s_f_r',
+ '%s_f_sp',
+ '%s_g_b',
+ '%s_g_s',
+ '%s_gcd',
+ '%s_grand',
+ '%s_h_b',
+ '%s_h_s',
+ '%s_i_b',
+ '%s_i_c',
+ '%s_i_ce',
+ '%s_i_h',
+ '%s_i_hm',
+ '%s_i_i',
+ '%s_i_lss',
+ '%s_i_p',
+ '%s_i_r',
+ '%s_i_s',
+ '%s_i_sp',
+ '%s_i_spb',
+ '%s_i_st',
+ '%s_j_i',
+ '%s_k_hm',
+ '%s_k_p',
+ '%s_k_r',
+ '%s_k_sp',
+ '%s_l_b',
+ '%s_l_hm',
+ '%s_l_i',
+ '%s_l_lss',
+ '%s_l_p',
+ '%s_l_r',
+ '%s_l_s',
+ '%s_l_sp',
+ '%s_lcm',
+ '%s_m_b',
+ '%s_m_hm',
+ '%s_m_i',
+ '%s_m_ip',
+ '%s_m_lss',
+ '%s_m_msp',
+ '%s_m_r',
+ '%s_matrix',
+ '%s_n_hm',
+ '%s_n_i',
+ '%s_n_l',
+ '%s_n_lss',
+ '%s_n_r',
+ '%s_n_st',
+ '%s_o_hm',
+ '%s_o_i',
+ '%s_o_l',
+ '%s_o_lss',
+ '%s_o_r',
+ '%s_o_st',
+ '%s_or',
+ '%s_p_b',
+ '%s_p_i',
+ '%s_pow',
+ '%s_q_hm',
+ '%s_q_i',
+ '%s_q_p',
+ '%s_q_r',
+ '%s_q_sp',
+ '%s_r_b',
+ '%s_r_i',
+ '%s_r_lss',
+ '%s_r_p',
+ '%s_r_r',
+ '%s_r_s',
+ '%s_r_sp',
+ '%s_s_b',
+ '%s_s_hm',
+ '%s_s_i',
+ '%s_s_ip',
+ '%s_s_lss',
+ '%s_s_r',
+ '%s_s_sp',
+ '%s_simp',
+ '%s_v_lss',
+ '%s_v_p',
+ '%s_v_r',
+ '%s_v_s',
+ '%s_x_b',
+ '%s_x_hm',
+ '%s_x_i',
+ '%s_x_r',
+ '%s_y_p',
+ '%s_y_r',
+ '%s_y_sp',
+ '%s_z_p',
+ '%s_z_r',
+ '%s_z_sp',
+ '%sn',
+ '%sp_a_s',
+ '%sp_a_sp',
+ '%sp_and',
+ '%sp_c_s',
+ '%sp_ceil',
+ '%sp_conj',
+ '%sp_cos',
+ '%sp_cumprod',
+ '%sp_cumsum',
+ '%sp_d_s',
+ '%sp_d_sp',
+ '%sp_det',
+ '%sp_diag',
+ '%sp_e',
+ '%sp_exp',
+ '%sp_f_s',
+ '%sp_floor',
+ '%sp_grand',
+ '%sp_gsort',
+ '%sp_i_ce',
+ '%sp_i_h',
+ '%sp_i_s',
+ '%sp_i_sp',
+ '%sp_i_st',
+ '%sp_int',
+ '%sp_inv',
+ '%sp_k_s',
+ '%sp_k_sp',
+ '%sp_l_s',
+ '%sp_l_sp',
+ '%sp_length',
+ '%sp_max',
+ '%sp_min',
+ '%sp_norm',
+ '%sp_or',
+ '%sp_p_s',
+ '%sp_prod',
+ '%sp_q_s',
+ '%sp_q_sp',
+ '%sp_r_s',
+ '%sp_r_sp',
+ '%sp_round',
+ '%sp_s_s',
+ '%sp_s_sp',
+ '%sp_sin',
+ '%sp_sqrt',
+ '%sp_string',
+ '%sp_sum',
+ '%sp_tril',
+ '%sp_triu',
+ '%sp_y_s',
+ '%sp_y_sp',
+ '%sp_z_s',
+ '%sp_z_sp',
+ '%spb_and',
+ '%spb_c_b',
+ '%spb_cumprod',
+ '%spb_cumsum',
+ '%spb_diag',
+ '%spb_e',
+ '%spb_f_b',
+ '%spb_g_b',
+ '%spb_g_spb',
+ '%spb_h_b',
+ '%spb_h_spb',
+ '%spb_i_b',
+ '%spb_i_ce',
+ '%spb_i_h',
+ '%spb_i_st',
+ '%spb_or',
+ '%spb_prod',
+ '%spb_sum',
+ '%spb_tril',
+ '%spb_triu',
+ '%st_6',
+ '%st_c_st',
+ '%st_e',
+ '%st_f_st',
+ '%st_i_b',
+ '%st_i_c',
+ '%st_i_fptr',
+ '%st_i_h',
+ '%st_i_i',
+ '%st_i_ip',
+ '%st_i_lss',
+ '%st_i_msp',
+ '%st_i_p',
+ '%st_i_r',
+ '%st_i_s',
+ '%st_i_sp',
+ '%st_i_spb',
+ '%st_i_st',
+ '%st_matrix',
+ '%st_n_c',
+ '%st_n_l',
+ '%st_n_mc',
+ '%st_n_p',
+ '%st_n_s',
+ '%st_o_c',
+ '%st_o_l',
+ '%st_o_mc',
+ '%st_o_p',
+ '%st_o_s',
+ '%st_o_tl',
+ '%st_p',
+ '%st_size',
+ '%st_string',
+ '%st_t',
+ '%ticks_i_h',
+ '%xls_e',
+ '%xls_p',
+ '%xlssheet_e',
+ '%xlssheet_p',
+ '%xlssheet_size',
+ '%xlssheet_string',
+ 'DominationRank',
+ 'G_make',
+ 'IsAScalar',
+ 'NDcost',
+ 'OS_Version',
+ 'PlotSparse',
+ 'ReadHBSparse',
+ 'TCL_CreateSlave',
+ 'abcd',
+ 'abinv',
+ 'accept_func_default',
+ 'accept_func_vfsa',
+ 'acf',
+ 'acosd',
+ 'acosh',
+ 'acoshm',
+ 'acosm',
+ 'acot',
+ 'acotd',
+ 'acoth',
+ 'acsc',
+ 'acscd',
+ 'acsch',
+ 'add_demo',
+ 'add_help_chapter',
+ 'add_module_help_chapter',
+ 'add_param',
+ 'add_profiling',
+ 'adj2sp',
+ 'aff2ab',
+ 'ana_style',
+ 'analpf',
+ 'analyze',
+ 'aplat',
+ 'arhnk',
+ 'arl2',
+ 'arma2p',
+ 'arma2ss',
+ 'armac',
+ 'armax',
+ 'armax1',
+ 'arobasestring2strings',
+ 'arsimul',
+ 'ascii2string',
+ 'asciimat',
+ 'asec',
+ 'asecd',
+ 'asech',
+ 'asind',
+ 'asinh',
+ 'asinhm',
+ 'asinm',
+ 'assert_checkalmostequal',
+ 'assert_checkequal',
+ 'assert_checkerror',
+ 'assert_checkfalse',
+ 'assert_checkfilesequal',
+ 'assert_checktrue',
+ 'assert_comparecomplex',
+ 'assert_computedigits',
+ 'assert_cond2reltol',
+ 'assert_cond2reqdigits',
+ 'assert_generror',
+ 'atand',
+ 'atanh',
+ 'atanhm',
+ 'atanm',
+ 'atomsAutoload',
+ 'atomsAutoloadAdd',
+ 'atomsAutoloadDel',
+ 'atomsAutoloadList',
+ 'atomsCategoryList',
+ 'atomsCheckModule',
+ 'atomsDepTreeShow',
+ 'atomsGetConfig',
+ 'atomsGetInstalled',
+ 'atomsGetInstalledPath',
+ 'atomsGetLoaded',
+ 'atomsGetLoadedPath',
+ 'atomsInstall',
+ 'atomsIsInstalled',
+ 'atomsIsLoaded',
+ 'atomsList',
+ 'atomsLoad',
+ 'atomsQuit',
+ 'atomsRemove',
+ 'atomsRepositoryAdd',
+ 'atomsRepositoryDel',
+ 'atomsRepositoryList',
+ 'atomsRestoreConfig',
+ 'atomsSaveConfig',
+ 'atomsSearch',
+ 'atomsSetConfig',
+ 'atomsShow',
+ 'atomsSystemInit',
+ 'atomsSystemUpdate',
+ 'atomsTest',
+ 'atomsUpdate',
+ 'atomsVersion',
+ 'augment',
+ 'auread',
+ 'auwrite',
+ 'balreal',
+ 'bench_run',
+ 'bilin',
+ 'bilt',
+ 'bin2dec',
+ 'binomial',
+ 'bitand',
+ 'bitcmp',
+ 'bitget',
+ 'bitor',
+ 'bitset',
+ 'bitxor',
+ 'black',
+ 'blanks',
+ 'bloc2exp',
+ 'bloc2ss',
+ 'block_parameter_error',
+ 'bode',
+ 'bode_asymp',
+ 'bstap',
+ 'buttmag',
+ 'bvodeS',
+ 'bytecode',
+ 'bytecodewalk',
+ 'cainv',
+ 'calendar',
+ 'calerf',
+ 'calfrq',
+ 'canon',
+ 'casc',
+ 'cat',
+ 'cat_code',
+ 'cb_m2sci_gui',
+ 'ccontrg',
+ 'cell',
+ 'cell2mat',
+ 'cellstr',
+ 'center',
+ 'cepstrum',
+ 'cfspec',
+ 'char',
+ 'chart',
+ 'cheb1mag',
+ 'cheb2mag',
+ 'check_gateways',
+ 'check_modules_xml',
+ 'check_versions',
+ 'chepol',
+ 'chfact',
+ 'chsolve',
+ 'classmarkov',
+ 'clean_help',
+ 'clock',
+ 'cls2dls',
+ 'cmb_lin',
+ 'cmndred',
+ 'cmoment',
+ 'coding_ga_binary',
+ 'coding_ga_identity',
+ 'coff',
+ 'coffg',
+ 'colcomp',
+ 'colcompr',
+ 'colinout',
+ 'colregul',
+ 'companion',
+ 'complex',
+ 'compute_initial_temp',
+ 'cond',
+ 'cond2sp',
+ 'condestsp',
+ 'configure_msifort',
+ 'configure_msvc',
+ 'conjgrad',
+ 'cont_frm',
+ 'cont_mat',
+ 'contrss',
+ 'conv',
+ 'convert_to_float',
+ 'convertindex',
+ 'convol',
+ 'convol2d',
+ 'copfac',
+ 'correl',
+ 'cosd',
+ 'cosh',
+ 'coshm',
+ 'cosm',
+ 'cotd',
+ 'cotg',
+ 'coth',
+ 'cothm',
+ 'cov',
+ 'covar',
+ 'createXConfiguration',
+ 'createfun',
+ 'createstruct',
+ 'cross',
+ 'crossover_ga_binary',
+ 'crossover_ga_default',
+ 'csc',
+ 'cscd',
+ 'csch',
+ 'csgn',
+ 'csim',
+ 'cspect',
+ 'ctr_gram',
+ 'czt',
+ 'dae',
+ 'daeoptions',
+ 'damp',
+ 'datafit',
+ 'date',
+ 'datenum',
+ 'datevec',
+ 'dbphi',
+ 'dcf',
+ 'ddp',
+ 'dec2bin',
+ 'dec2hex',
+ 'dec2oct',
+ 'del_help_chapter',
+ 'del_module_help_chapter',
+ 'demo_begin',
+ 'demo_choose',
+ 'demo_compiler',
+ 'demo_end',
+ 'demo_file_choice',
+ 'demo_folder_choice',
+ 'demo_function_choice',
+ 'demo_gui',
+ 'demo_run',
+ 'demo_viewCode',
+ 'denom',
+ 'derivat',
+ 'derivative',
+ 'des2ss',
+ 'des2tf',
+ 'detectmsifort64tools',
+ 'detectmsvc64tools',
+ 'determ',
+ 'detr',
+ 'detrend',
+ 'devtools_run_builder',
+ 'dhnorm',
+ 'diff',
+ 'diophant',
+ 'dir',
+ 'dirname',
+ 'dispfiles',
+ 'dllinfo',
+ 'dscr',
+ 'dsimul',
+ 'dt_ility',
+ 'dtsi',
+ 'edit',
+ 'edit_error',
+ 'editor',
+ 'eigenmarkov',
+ 'eigs',
+ 'ell1mag',
+ 'enlarge_shape',
+ 'entropy',
+ 'eomday',
+ 'epred',
+ 'eqfir',
+ 'eqiir',
+ 'equil',
+ 'equil1',
+ 'erfinv',
+ 'etime',
+ 'eval',
+ 'evans',
+ 'evstr',
+ 'example_run',
+ 'expression2code',
+ 'extract_help_examples',
+ 'factor',
+ 'factorial',
+ 'factors',
+ 'faurre',
+ 'ffilt',
+ 'fft2',
+ 'fftshift',
+ 'fieldnames',
+ 'filt_sinc',
+ 'filter',
+ 'findABCD',
+ 'findAC',
+ 'findBDK',
+ 'findR',
+ 'find_freq',
+ 'find_links',
+ 'find_scicos_version',
+ 'findm',
+ 'findmsifortcompiler',
+ 'findmsvccompiler',
+ 'findx0BD',
+ 'firstnonsingleton',
+ 'fix',
+ 'fixedpointgcd',
+ 'flipdim',
+ 'flts',
+ 'fminsearch',
+ 'formatBlackTip',
+ 'formatBodeMagTip',
+ 'formatBodePhaseTip',
+ 'formatGainplotTip',
+ 'formatHallModuleTip',
+ 'formatHallPhaseTip',
+ 'formatNicholsGainTip',
+ 'formatNicholsPhaseTip',
+ 'formatNyquistTip',
+ 'formatPhaseplotTip',
+ 'formatSgridDampingTip',
+ 'formatSgridFreqTip',
+ 'formatZgridDampingTip',
+ 'formatZgridFreqTip',
+ 'format_txt',
+ 'fourplan',
+ 'frep2tf',
+ 'freson',
+ 'frfit',
+ 'frmag',
+ 'fseek_origin',
+ 'fsfirlin',
+ 'fspec',
+ 'fspecg',
+ 'fstabst',
+ 'ftest',
+ 'ftuneq',
+ 'fullfile',
+ 'fullrf',
+ 'fullrfk',
+ 'fun2string',
+ 'g_margin',
+ 'gainplot',
+ 'gamitg',
+ 'gcare',
+ 'gcd',
+ 'gencompilationflags_unix',
+ 'generateBlockImage',
+ 'generateBlockImages',
+ 'generic_i_ce',
+ 'generic_i_h',
+ 'generic_i_hm',
+ 'generic_i_s',
+ 'generic_i_st',
+ 'genlib',
+ 'genmarkov',
+ 'geomean',
+ 'getDiagramVersion',
+ 'getModelicaPath',
+ 'getPreferencesValue',
+ 'get_file_path',
+ 'get_function_path',
+ 'get_param',
+ 'get_profile',
+ 'get_scicos_version',
+ 'getd',
+ 'getscilabkeywords',
+ 'getshell',
+ 'gettklib',
+ 'gfare',
+ 'gfrancis',
+ 'givens',
+ 'glever',
+ 'gmres',
+ 'group',
+ 'gschur',
+ 'gspec',
+ 'gtild',
+ 'h2norm',
+ 'h_cl',
+ 'h_inf',
+ 'h_inf_st',
+ 'h_norm',
+ 'hallchart',
+ 'halt',
+ 'hank',
+ 'hankelsv',
+ 'harmean',
+ 'haveacompiler',
+ 'head_comments',
+ 'help_from_sci',
+ 'help_skeleton',
+ 'hermit',
+ 'hex2dec',
+ 'hilb',
+ 'hilbert',
+ 'histc',
+ 'horner',
+ 'householder',
+ 'hrmt',
+ 'htrianr',
+ 'hypermat',
+ 'idct',
+ 'idst',
+ 'ifft',
+ 'ifftshift',
+ 'iir',
+ 'iirgroup',
+ 'iirlp',
+ 'iirmod',
+ 'ilib_build',
+ 'ilib_build_jar',
+ 'ilib_compile',
+ 'ilib_for_link',
+ 'ilib_gen_Make',
+ 'ilib_gen_Make_unix',
+ 'ilib_gen_cleaner',
+ 'ilib_gen_gateway',
+ 'ilib_gen_loader',
+ 'ilib_include_flag',
+ 'ilib_mex_build',
+ 'im_inv',
+ 'importScicosDiagram',
+ 'importScicosPal',
+ 'importXcosDiagram',
+ 'imrep2ss',
+ 'ind2sub',
+ 'inistate',
+ 'init_ga_default',
+ 'init_param',
+ 'initial_scicos_tables',
+ 'input',
+ 'instruction2code',
+ 'intc',
+ 'intdec',
+ 'integrate',
+ 'interp1',
+ 'interpln',
+ 'intersect',
+ 'intl',
+ 'intsplin',
+ 'inttrap',
+ 'inv_coeff',
+ 'invr',
+ 'invrs',
+ 'invsyslin',
+ 'iqr',
+ 'isLeapYear',
+ 'is_absolute_path',
+ 'is_param',
+ 'iscell',
+ 'iscellstr',
+ 'iscolumn',
+ 'isempty',
+ 'isfield',
+ 'isinf',
+ 'ismatrix',
+ 'isnan',
+ 'isrow',
+ 'isscalar',
+ 'issparse',
+ 'issquare',
+ 'isstruct',
+ 'isvector',
+ 'jmat',
+ 'justify',
+ 'kalm',
+ 'karmarkar',
+ 'kernel',
+ 'kpure',
+ 'krac2',
+ 'kroneck',
+ 'lattn',
+ 'lattp',
+ 'launchtest',
+ 'lcf',
+ 'lcm',
+ 'lcmdiag',
+ 'leastsq',
+ 'leqe',
+ 'leqr',
+ 'lev',
+ 'levin',
+ 'lex_sort',
+ 'lft',
+ 'lin',
+ 'lin2mu',
+ 'lincos',
+ 'lindquist',
+ 'linf',
+ 'linfn',
+ 'linsolve',
+ 'linspace',
+ 'list2vec',
+ 'list_param',
+ 'listfiles',
+ 'listfunctions',
+ 'listvarinfile',
+ 'lmisolver',
+ 'lmitool',
+ 'loadXcosLibs',
+ 'loadmatfile',
+ 'loadwave',
+ 'log10',
+ 'log2',
+ 'logm',
+ 'logspace',
+ 'lqe',
+ 'lqg',
+ 'lqg2stan',
+ 'lqg_ltr',
+ 'lqr',
+ 'ls',
+ 'lyap',
+ 'm2sci_gui',
+ 'm_circle',
+ 'macglov',
+ 'macrovar',
+ 'mad',
+ 'makecell',
+ 'manedit',
+ 'mapsound',
+ 'markp2ss',
+ 'matfile2sci',
+ 'mdelete',
+ 'mean',
+ 'meanf',
+ 'median',
+ 'members',
+ 'mese',
+ 'meshgrid',
+ 'mfft',
+ 'mfile2sci',
+ 'minreal',
+ 'minss',
+ 'mkdir',
+ 'modulo',
+ 'moment',
+ 'mrfit',
+ 'msd',
+ 'mstr2sci',
+ 'mtlb',
+ 'mtlb_0',
+ 'mtlb_a',
+ 'mtlb_all',
+ 'mtlb_any',
+ 'mtlb_axes',
+ 'mtlb_axis',
+ 'mtlb_beta',
+ 'mtlb_box',
+ 'mtlb_choices',
+ 'mtlb_close',
+ 'mtlb_colordef',
+ 'mtlb_cond',
+ 'mtlb_cov',
+ 'mtlb_cumprod',
+ 'mtlb_cumsum',
+ 'mtlb_dec2hex',
+ 'mtlb_delete',
+ 'mtlb_diag',
+ 'mtlb_diff',
+ 'mtlb_dir',
+ 'mtlb_double',
+ 'mtlb_e',
+ 'mtlb_echo',
+ 'mtlb_error',
+ 'mtlb_eval',
+ 'mtlb_exist',
+ 'mtlb_eye',
+ 'mtlb_false',
+ 'mtlb_fft',
+ 'mtlb_fftshift',
+ 'mtlb_filter',
+ 'mtlb_find',
+ 'mtlb_findstr',
+ 'mtlb_fliplr',
+ 'mtlb_fopen',
+ 'mtlb_format',
+ 'mtlb_fprintf',
+ 'mtlb_fread',
+ 'mtlb_fscanf',
+ 'mtlb_full',
+ 'mtlb_fwrite',
+ 'mtlb_get',
+ 'mtlb_grid',
+ 'mtlb_hold',
+ 'mtlb_i',
+ 'mtlb_ifft',
+ 'mtlb_image',
+ 'mtlb_imp',
+ 'mtlb_int16',
+ 'mtlb_int32',
+ 'mtlb_int8',
+ 'mtlb_is',
+ 'mtlb_isa',
+ 'mtlb_isfield',
+ 'mtlb_isletter',
+ 'mtlb_isspace',
+ 'mtlb_l',
+ 'mtlb_legendre',
+ 'mtlb_linspace',
+ 'mtlb_logic',
+ 'mtlb_logical',
+ 'mtlb_loglog',
+ 'mtlb_lower',
+ 'mtlb_max',
+ 'mtlb_mean',
+ 'mtlb_median',
+ 'mtlb_mesh',
+ 'mtlb_meshdom',
+ 'mtlb_min',
+ 'mtlb_more',
+ 'mtlb_num2str',
+ 'mtlb_ones',
+ 'mtlb_pcolor',
+ 'mtlb_plot',
+ 'mtlb_prod',
+ 'mtlb_qr',
+ 'mtlb_qz',
+ 'mtlb_rand',
+ 'mtlb_randn',
+ 'mtlb_rcond',
+ 'mtlb_realmax',
+ 'mtlb_realmin',
+ 'mtlb_s',
+ 'mtlb_semilogx',
+ 'mtlb_semilogy',
+ 'mtlb_setstr',
+ 'mtlb_size',
+ 'mtlb_sort',
+ 'mtlb_sortrows',
+ 'mtlb_sprintf',
+ 'mtlb_sscanf',
+ 'mtlb_std',
+ 'mtlb_strcmp',
+ 'mtlb_strcmpi',
+ 'mtlb_strfind',
+ 'mtlb_strrep',
+ 'mtlb_subplot',
+ 'mtlb_sum',
+ 'mtlb_t',
+ 'mtlb_toeplitz',
+ 'mtlb_tril',
+ 'mtlb_triu',
+ 'mtlb_true',
+ 'mtlb_type',
+ 'mtlb_uint16',
+ 'mtlb_uint32',
+ 'mtlb_uint8',
+ 'mtlb_upper',
+ 'mtlb_var',
+ 'mtlb_zeros',
+ 'mu2lin',
+ 'mutation_ga_binary',
+ 'mutation_ga_default',
+ 'mvcorrel',
+ 'mvvacov',
+ 'nancumsum',
+ 'nand2mean',
+ 'nanmax',
+ 'nanmean',
+ 'nanmeanf',
+ 'nanmedian',
+ 'nanmin',
+ 'nanreglin',
+ 'nanstdev',
+ 'nansum',
+ 'narsimul',
+ 'ndgrid',
+ 'ndims',
+ 'nehari',
+ 'neigh_func_csa',
+ 'neigh_func_default',
+ 'neigh_func_fsa',
+ 'neigh_func_vfsa',
+ 'neldermead_cget',
+ 'neldermead_configure',
+ 'neldermead_costf',
+ 'neldermead_defaultoutput',
+ 'neldermead_destroy',
+ 'neldermead_function',
+ 'neldermead_get',
+ 'neldermead_log',
+ 'neldermead_new',
+ 'neldermead_restart',
+ 'neldermead_search',
+ 'neldermead_updatesimp',
+ 'nextpow2',
+ 'nfreq',
+ 'nicholschart',
+ 'nlev',
+ 'nmplot_cget',
+ 'nmplot_configure',
+ 'nmplot_contour',
+ 'nmplot_destroy',
+ 'nmplot_function',
+ 'nmplot_get',
+ 'nmplot_historyplot',
+ 'nmplot_log',
+ 'nmplot_new',
+ 'nmplot_outputcmd',
+ 'nmplot_restart',
+ 'nmplot_search',
+ 'nmplot_simplexhistory',
+ 'noisegen',
+ 'nonreg_test_run',
+ 'now',
+ 'nthroot',
+ 'null',
+ 'num2cell',
+ 'numderivative',
+ 'numdiff',
+ 'numer',
+ 'nyquist',
+ 'nyquistfrequencybounds',
+ 'obs_gram',
+ 'obscont',
+ 'observer',
+ 'obsv_mat',
+ 'obsvss',
+ 'oct2dec',
+ 'odeoptions',
+ 'optim_ga',
+ 'optim_moga',
+ 'optim_nsga',
+ 'optim_nsga2',
+ 'optim_sa',
+ 'optimbase_cget',
+ 'optimbase_checkbounds',
+ 'optimbase_checkcostfun',
+ 'optimbase_checkx0',
+ 'optimbase_configure',
+ 'optimbase_destroy',
+ 'optimbase_function',
+ 'optimbase_get',
+ 'optimbase_hasbounds',
+ 'optimbase_hasconstraints',
+ 'optimbase_hasnlcons',
+ 'optimbase_histget',
+ 'optimbase_histset',
+ 'optimbase_incriter',
+ 'optimbase_isfeasible',
+ 'optimbase_isinbounds',
+ 'optimbase_isinnonlincons',
+ 'optimbase_log',
+ 'optimbase_logshutdown',
+ 'optimbase_logstartup',
+ 'optimbase_new',
+ 'optimbase_outputcmd',
+ 'optimbase_outstruct',
+ 'optimbase_proj2bnds',
+ 'optimbase_set',
+ 'optimbase_stoplog',
+ 'optimbase_terminate',
+ 'optimget',
+ 'optimplotfunccount',
+ 'optimplotfval',
+ 'optimplotx',
+ 'optimset',
+ 'optimsimplex_center',
+ 'optimsimplex_check',
+ 'optimsimplex_compsomefv',
+ 'optimsimplex_computefv',
+ 'optimsimplex_deltafv',
+ 'optimsimplex_deltafvmax',
+ 'optimsimplex_destroy',
+ 'optimsimplex_dirmat',
+ 'optimsimplex_fvmean',
+ 'optimsimplex_fvstdev',
+ 'optimsimplex_fvvariance',
+ 'optimsimplex_getall',
+ 'optimsimplex_getallfv',
+ 'optimsimplex_getallx',
+ 'optimsimplex_getfv',
+ 'optimsimplex_getn',
+ 'optimsimplex_getnbve',
+ 'optimsimplex_getve',
+ 'optimsimplex_getx',
+ 'optimsimplex_gradientfv',
+ 'optimsimplex_log',
+ 'optimsimplex_new',
+ 'optimsimplex_reflect',
+ 'optimsimplex_setall',
+ 'optimsimplex_setallfv',
+ 'optimsimplex_setallx',
+ 'optimsimplex_setfv',
+ 'optimsimplex_setn',
+ 'optimsimplex_setnbve',
+ 'optimsimplex_setve',
+ 'optimsimplex_setx',
+ 'optimsimplex_shrink',
+ 'optimsimplex_size',
+ 'optimsimplex_sort',
+ 'optimsimplex_xbar',
+ 'orth',
+ 'output_ga_default',
+ 'output_moga_default',
+ 'output_nsga2_default',
+ 'output_nsga_default',
+ 'p_margin',
+ 'pack',
+ 'pareto_filter',
+ 'parrot',
+ 'pbig',
+ 'pca',
+ 'pcg',
+ 'pdiv',
+ 'pen2ea',
+ 'pencan',
+ 'pencost',
+ 'penlaur',
+ 'perctl',
+ 'perl',
+ 'perms',
+ 'permute',
+ 'pertrans',
+ 'pfactors',
+ 'pfss',
+ 'phasemag',
+ 'phaseplot',
+ 'phc',
+ 'pinv',
+ 'playsnd',
+ 'plotprofile',
+ 'plzr',
+ 'pmodulo',
+ 'pol2des',
+ 'pol2str',
+ 'polar',
+ 'polfact',
+ 'prbs_a',
+ 'prettyprint',
+ 'primes',
+ 'princomp',
+ 'profile',
+ 'proj',
+ 'projsl',
+ 'projspec',
+ 'psmall',
+ 'pspect',
+ 'qmr',
+ 'qpsolve',
+ 'quart',
+ 'quaskro',
+ 'rafiter',
+ 'randpencil',
+ 'range',
+ 'rank',
+ 'readxls',
+ 'recompilefunction',
+ 'recons',
+ 'reglin',
+ 'regress',
+ 'remezb',
+ 'remove_param',
+ 'remove_profiling',
+ 'repfreq',
+ 'replace_Ix_by_Fx',
+ 'repmat',
+ 'reset_profiling',
+ 'resize_matrix',
+ 'returntoscilab',
+ 'rhs2code',
+ 'ric_desc',
+ 'riccati',
+ 'rmdir',
+ 'routh_t',
+ 'rowcomp',
+ 'rowcompr',
+ 'rowinout',
+ 'rowregul',
+ 'rowshuff',
+ 'rref',
+ 'sample',
+ 'samplef',
+ 'samwr',
+ 'savematfile',
+ 'savewave',
+ 'scanf',
+ 'sci2exp',
+ 'sciGUI_init',
+ 'sci_sparse',
+ 'scicos_getvalue',
+ 'scicos_simulate',
+ 'scicos_workspace_init',
+ 'scisptdemo',
+ 'scitest',
+ 'sdiff',
+ 'sec',
+ 'secd',
+ 'sech',
+ 'selection_ga_elitist',
+ 'selection_ga_random',
+ 'sensi',
+ 'setPreferencesValue',
+ 'set_param',
+ 'setdiff',
+ 'sgrid',
+ 'show_margins',
+ 'show_pca',
+ 'showprofile',
+ 'signm',
+ 'sinc',
+ 'sincd',
+ 'sind',
+ 'sinh',
+ 'sinhm',
+ 'sinm',
+ 'sm2des',
+ 'sm2ss',
+ 'smga',
+ 'smooth',
+ 'solve',
+ 'sound',
+ 'soundsec',
+ 'sp2adj',
+ 'spaninter',
+ 'spanplus',
+ 'spantwo',
+ 'specfact',
+ 'speye',
+ 'sprand',
+ 'spzeros',
+ 'sqroot',
+ 'sqrtm',
+ 'squarewave',
+ 'squeeze',
+ 'srfaur',
+ 'srkf',
+ 'ss2des',
+ 'ss2ss',
+ 'ss2tf',
+ 'sskf',
+ 'ssprint',
+ 'ssrand',
+ 'st_deviation',
+ 'st_i_generic',
+ 'st_ility',
+ 'stabil',
+ 'statgain',
+ 'stdev',
+ 'stdevf',
+ 'steadycos',
+ 'strange',
+ 'strcmpi',
+ 'struct',
+ 'sub2ind',
+ 'sva',
+ 'svplot',
+ 'sylm',
+ 'sylv',
+ 'sysconv',
+ 'sysdiag',
+ 'sysfact',
+ 'syslin',
+ 'syssize',
+ 'system',
+ 'systmat',
+ 'tabul',
+ 'tand',
+ 'tanh',
+ 'tanhm',
+ 'tanm',
+ 'tbx_build_blocks',
+ 'tbx_build_cleaner',
+ 'tbx_build_gateway',
+ 'tbx_build_gateway_clean',
+ 'tbx_build_gateway_loader',
+ 'tbx_build_help',
+ 'tbx_build_help_loader',
+ 'tbx_build_loader',
+ 'tbx_build_localization',
+ 'tbx_build_macros',
+ 'tbx_build_pal_loader',
+ 'tbx_build_src',
+ 'tbx_builder',
+ 'tbx_builder_gateway',
+ 'tbx_builder_gateway_lang',
+ 'tbx_builder_help',
+ 'tbx_builder_help_lang',
+ 'tbx_builder_macros',
+ 'tbx_builder_src',
+ 'tbx_builder_src_lang',
+ 'tbx_generate_pofile',
+ 'temp_law_csa',
+ 'temp_law_default',
+ 'temp_law_fsa',
+ 'temp_law_huang',
+ 'temp_law_vfsa',
+ 'test_clean',
+ 'test_on_columns',
+ 'test_run',
+ 'test_run_level',
+ 'testexamples',
+ 'tf2des',
+ 'tf2ss',
+ 'thrownan',
+ 'tic',
+ 'time_id',
+ 'toc',
+ 'toeplitz',
+ 'tokenpos',
+ 'toolboxes',
+ 'trace',
+ 'trans',
+ 'translatepaths',
+ 'tree2code',
+ 'trfmod',
+ 'trianfml',
+ 'trimmean',
+ 'trisolve',
+ 'trzeros',
+ 'typeof',
+ 'ui_observer',
+ 'union',
+ 'unique',
+ 'unit_test_run',
+ 'unix_g',
+ 'unix_s',
+ 'unix_w',
+ 'unix_x',
+ 'unobs',
+ 'unpack',
+ 'unwrap',
+ 'variance',
+ 'variancef',
+ 'vec2list',
+ 'vectorfind',
+ 'ver',
+ 'warnobsolete',
+ 'wavread',
+ 'wavwrite',
+ 'wcenter',
+ 'weekday',
+ 'wfir',
+ 'wfir_gui',
+ 'whereami',
+ 'who_user',
+ 'whos',
+ 'wiener',
+ 'wigner',
+ 'window',
+ 'winlist',
+ 'with_javasci',
+ 'with_macros_source',
+ 'with_modelica_compiler',
+ 'with_tk',
+ 'xcorr',
+ 'xcosBlockEval',
+ 'xcosBlockInterface',
+ 'xcosCodeGeneration',
+ 'xcosConfigureModelica',
+ 'xcosPal',
+ 'xcosPalAdd',
+ 'xcosPalAddBlock',
+ 'xcosPalExport',
+ 'xcosPalGenerateAllIcons',
+ 'xcosShowBlockWarning',
+ 'xcosValidateBlockSet',
+ 'xcosValidateCompareBlock',
+ 'xcos_compile',
+ 'xcos_debug_gui',
+ 'xcos_run',
+ 'xcos_simulate',
+ 'xcov',
+ 'xmltochm',
+ 'xmltoformat',
+ 'xmltohtml',
+ 'xmltojar',
+ 'xmltopdf',
+ 'xmltops',
+ 'xmltoweb',
+ 'yulewalk',
+ 'zeropen',
+ 'zgrid',
+ 'zpbutt',
+ 'zpch1',
+ 'zpch2',
+ 'zpell',
+)
+
+variables_kw = (
+ '$',
+ '%F',
+ '%T',
+ '%e',
+ '%eps',
+ '%f',
+ '%fftw',
+ '%gui',
+ '%i',
+ '%inf',
+ '%io',
+ '%modalWarning',
+ '%nan',
+ '%pi',
+ '%s',
+ '%t',
+ '%tk',
+ '%toolboxes',
+ '%toolboxes_dir',
+ '%z',
+ 'PWD',
+ 'SCI',
+ 'SCIHOME',
+ 'TMPDIR',
+ 'arnoldilib',
+ 'assertlib',
+ 'atomslib',
+ 'cacsdlib',
+ 'compatibility_functilib',
+ 'corelib',
+ 'data_structureslib',
+ 'demo_toolslib',
+ 'development_toolslib',
+ 'differential_equationlib',
+ 'dynamic_linklib',
+ 'elementary_functionslib',
+ 'enull',
+ 'evoid',
+ 'external_objectslib',
+ 'fd',
+ 'fileiolib',
+ 'functionslib',
+ 'genetic_algorithmslib',
+ 'helptoolslib',
+ 'home',
+ 'integerlib',
+ 'interpolationlib',
+ 'iolib',
+ 'jnull',
+ 'jvoid',
+ 'linear_algebralib',
+ 'm2scilib',
+ 'matiolib',
+ 'modules_managerlib',
+ 'neldermeadlib',
+ 'optimbaselib',
+ 'optimizationlib',
+ 'optimsimplexlib',
+ 'output_streamlib',
+ 'overloadinglib',
+ 'parameterslib',
+ 'polynomialslib',
+ 'preferenceslib',
+ 'randliblib',
+ 'scicos_autolib',
+ 'scicos_utilslib',
+ 'scinoteslib',
+ 'signal_processinglib',
+ 'simulated_annealinglib',
+ 'soundlib',
+ 'sparselib',
+ 'special_functionslib',
+ 'spreadsheetlib',
+ 'statisticslib',
+ 'stringlib',
+ 'tclscilib',
+ 'timelib',
+ 'umfpacklib',
+ 'xcoslib',
+)
+
+
+if __name__ == '__main__': # pragma: no cover
+ import subprocess
+ from pygments.util import format_lines, duplicates_removed
+
+ mapping = {'variables': 'builtin'}
+
+ def extract_completion(var_type):
+ s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = s.communicate('''\
+fd = mopen("/dev/stderr", "wt");
+mputl(strcat(completion("", "%s"), "||"), fd);
+mclose(fd)\n''' % var_type)
+ if '||' not in output[1]:
+ raise Exception(output[0])
+ # Invalid DISPLAY causes this to be output:
+ text = output[1].strip()
+ if text.startswith('Error: unable to open display \n'):
+ text = text[len('Error: unable to open display \n'):]
+ return text.split('||')
+
+ new_data = {}
+ seen = set() # only keep first type for a given word
+ for t in ('functions', 'commands', 'macros', 'variables'):
+ new_data[t] = duplicates_removed(extract_completion(t), seen)
+ seen.update(set(new_data[t]))
+
+
+ with open(__file__) as f:
+ content = f.read()
+
+ header = content[:content.find('# Autogenerated')]
+ footer = content[content.find("if __name__ == '__main__':"):]
+
+ with open(__file__, 'w') as f:
+ f.write(header)
+ f.write('# Autogenerated\n\n')
+ for k, v in sorted(new_data.items()):
+ f.write(format_lines(k + '_kw', v) + '\n\n')
+ f.write(footer)
diff --git a/pygments/lexers/_sourcemod_builtins.py b/pygments/lexers/_sourcemod_builtins.py
new file mode 100644
index 0000000..6f1392d
--- /dev/null
+++ b/pygments/lexers/_sourcemod_builtins.py
@@ -0,0 +1,1151 @@
+"""
+ pygments.lexers._sourcemod_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file contains the names of SourceMod functions.
+
+ Do not edit the FUNCTIONS list by hand.
+
+ Run with `python -I` to regenerate.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+FUNCTIONS = (
+ 'OnEntityCreated',
+ 'OnEntityDestroyed',
+ 'OnGetGameDescription',
+ 'OnLevelInit',
+ 'SDKHook',
+ 'SDKHookEx',
+ 'SDKUnhook',
+ 'SDKHooks_TakeDamage',
+ 'SDKHooks_DropWeapon',
+ 'TopMenuHandler',
+ 'CreateTopMenu',
+ 'LoadTopMenuConfig',
+ 'AddToTopMenu',
+ 'GetTopMenuInfoString',
+ 'GetTopMenuObjName',
+ 'RemoveFromTopMenu',
+ 'DisplayTopMenu',
+ 'DisplayTopMenuCategory',
+ 'FindTopMenuCategory',
+ 'SetTopMenuTitleCaching',
+ 'OnAdminMenuCreated',
+ 'OnAdminMenuReady',
+ 'GetAdminTopMenu',
+ 'AddTargetsToMenu',
+ 'AddTargetsToMenu2',
+ 'RedisplayAdminMenu',
+ 'TEHook',
+ 'AddTempEntHook',
+ 'RemoveTempEntHook',
+ 'TE_Start',
+ 'TE_IsValidProp',
+ 'TE_WriteNum',
+ 'TE_ReadNum',
+ 'TE_WriteFloat',
+ 'TE_ReadFloat',
+ 'TE_WriteVector',
+ 'TE_ReadVector',
+ 'TE_WriteAngles',
+ 'TE_WriteFloatArray',
+ 'TE_Send',
+ 'TE_WriteEncodedEnt',
+ 'TE_SendToAll',
+ 'TE_SendToClient',
+ 'CreateKeyValues',
+ 'KvSetString',
+ 'KvSetNum',
+ 'KvSetUInt64',
+ 'KvSetFloat',
+ 'KvSetColor',
+ 'KvSetVector',
+ 'KvGetString',
+ 'KvGetNum',
+ 'KvGetFloat',
+ 'KvGetColor',
+ 'KvGetUInt64',
+ 'KvGetVector',
+ 'KvJumpToKey',
+ 'KvJumpToKeySymbol',
+ 'KvGotoFirstSubKey',
+ 'KvGotoNextKey',
+ 'KvSavePosition',
+ 'KvDeleteKey',
+ 'KvDeleteThis',
+ 'KvGoBack',
+ 'KvRewind',
+ 'KvGetSectionName',
+ 'KvSetSectionName',
+ 'KvGetDataType',
+ 'KeyValuesToFile',
+ 'FileToKeyValues',
+ 'StringToKeyValues',
+ 'KvSetEscapeSequences',
+ 'KvNodesInStack',
+ 'KvCopySubkeys',
+ 'KvFindKeyById',
+ 'KvGetNameSymbol',
+ 'KvGetSectionSymbol',
+ 'TE_SetupSparks',
+ 'TE_SetupSmoke',
+ 'TE_SetupDust',
+ 'TE_SetupMuzzleFlash',
+ 'TE_SetupMetalSparks',
+ 'TE_SetupEnergySplash',
+ 'TE_SetupArmorRicochet',
+ 'TE_SetupGlowSprite',
+ 'TE_SetupExplosion',
+ 'TE_SetupBloodSprite',
+ 'TE_SetupBeamRingPoint',
+ 'TE_SetupBeamPoints',
+ 'TE_SetupBeamLaser',
+ 'TE_SetupBeamRing',
+ 'TE_SetupBeamFollow',
+ 'HookEvent',
+ 'HookEventEx',
+ 'UnhookEvent',
+ 'CreateEvent',
+ 'FireEvent',
+ 'CancelCreatedEvent',
+ 'GetEventBool',
+ 'SetEventBool',
+ 'GetEventInt',
+ 'SetEventInt',
+ 'GetEventFloat',
+ 'SetEventFloat',
+ 'GetEventString',
+ 'SetEventString',
+ 'GetEventName',
+ 'SetEventBroadcast',
+ 'GetUserMessageType',
+ 'GetUserMessageId',
+ 'GetUserMessageName',
+ 'StartMessage',
+ 'StartMessageEx',
+ 'EndMessage',
+ 'MsgHook',
+ 'MsgPostHook',
+ 'HookUserMessage',
+ 'UnhookUserMessage',
+ 'StartMessageAll',
+ 'StartMessageOne',
+ 'InactivateClient',
+ 'ReconnectClient',
+ 'GetMaxEntities',
+ 'GetEntityCount',
+ 'IsValidEntity',
+ 'IsValidEdict',
+ 'IsEntNetworkable',
+ 'CreateEdict',
+ 'RemoveEdict',
+ 'GetEdictFlags',
+ 'SetEdictFlags',
+ 'GetEdictClassname',
+ 'GetEntityNetClass',
+ 'ChangeEdictState',
+ 'GetEntData',
+ 'SetEntData',
+ 'GetEntDataFloat',
+ 'SetEntDataFloat',
+ 'GetEntDataEnt2',
+ 'SetEntDataEnt2',
+ 'GetEntDataVector',
+ 'SetEntDataVector',
+ 'GetEntDataString',
+ 'SetEntDataString',
+ 'FindSendPropOffs',
+ 'FindSendPropInfo',
+ 'FindDataMapOffs',
+ 'FindDataMapInfo',
+ 'GetEntSendPropOffs',
+ 'GetEntProp',
+ 'SetEntProp',
+ 'GetEntPropFloat',
+ 'SetEntPropFloat',
+ 'GetEntPropEnt',
+ 'SetEntPropEnt',
+ 'GetEntPropVector',
+ 'SetEntPropVector',
+ 'GetEntPropString',
+ 'SetEntPropString',
+ 'GetEntPropArraySize',
+ 'GetEntDataArray',
+ 'SetEntDataArray',
+ 'GetEntityAddress',
+ 'GetEntityClassname',
+ 'float',
+ 'FloatMul',
+ 'FloatDiv',
+ 'FloatAdd',
+ 'FloatSub',
+ 'FloatFraction',
+ 'RoundToZero',
+ 'RoundToCeil',
+ 'RoundToFloor',
+ 'RoundToNearest',
+ 'FloatCompare',
+ 'SquareRoot',
+ 'Pow',
+ 'Exponential',
+ 'Logarithm',
+ 'Sine',
+ 'Cosine',
+ 'Tangent',
+ 'FloatAbs',
+ 'ArcTangent',
+ 'ArcCosine',
+ 'ArcSine',
+ 'ArcTangent2',
+ 'RoundFloat',
+ 'operator%',
+ 'DegToRad',
+ 'RadToDeg',
+ 'GetURandomInt',
+ 'GetURandomFloat',
+ 'SetURandomSeed',
+ 'SetURandomSeedSimple',
+ 'RemovePlayerItem',
+ 'GivePlayerItem',
+ 'GetPlayerWeaponSlot',
+ 'IgniteEntity',
+ 'ExtinguishEntity',
+ 'TeleportEntity',
+ 'ForcePlayerSuicide',
+ 'SlapPlayer',
+ 'FindEntityByClassname',
+ 'GetClientEyeAngles',
+ 'CreateEntityByName',
+ 'DispatchSpawn',
+ 'DispatchKeyValue',
+ 'DispatchKeyValueFloat',
+ 'DispatchKeyValueVector',
+ 'GetClientAimTarget',
+ 'GetTeamCount',
+ 'GetTeamName',
+ 'GetTeamScore',
+ 'SetTeamScore',
+ 'GetTeamClientCount',
+ 'SetEntityModel',
+ 'GetPlayerDecalFile',
+ 'GetPlayerJingleFile',
+ 'GetServerNetStats',
+ 'EquipPlayerWeapon',
+ 'ActivateEntity',
+ 'SetClientInfo',
+ 'GivePlayerAmmo',
+ 'SetClientListeningFlags',
+ 'GetClientListeningFlags',
+ 'SetListenOverride',
+ 'GetListenOverride',
+ 'IsClientMuted',
+ 'TR_GetPointContents',
+ 'TR_GetPointContentsEnt',
+ 'TR_TraceRay',
+ 'TR_TraceHull',
+ 'TR_TraceRayFilter',
+ 'TR_TraceHullFilter',
+ 'TR_TraceRayEx',
+ 'TR_TraceHullEx',
+ 'TR_TraceRayFilterEx',
+ 'TR_TraceHullFilterEx',
+ 'TR_GetFraction',
+ 'TR_GetEndPosition',
+ 'TR_GetEntityIndex',
+ 'TR_DidHit',
+ 'TR_GetHitGroup',
+ 'TR_GetPlaneNormal',
+ 'TR_PointOutsideWorld',
+ 'SortIntegers',
+ 'SortFloats',
+ 'SortStrings',
+ 'SortFunc1D',
+ 'SortCustom1D',
+ 'SortCustom2D',
+ 'SortADTArray',
+ 'SortFuncADTArray',
+ 'SortADTArrayCustom',
+ 'CompileRegex',
+ 'MatchRegex',
+ 'GetRegexSubString',
+ 'SimpleRegexMatch',
+ 'TF2_GetPlayerClass',
+ 'TF2_SetPlayerClass',
+ 'TF2_RemoveWeaponSlot',
+ 'TF2_RemoveAllWeapons',
+ 'TF2_IsPlayerInCondition',
+ 'TF2_GetObjectType',
+ 'TF2_GetObjectMode',
+ 'NominateMap',
+ 'RemoveNominationByMap',
+ 'RemoveNominationByOwner',
+ 'GetExcludeMapList',
+ 'GetNominatedMapList',
+ 'CanMapChooserStartVote',
+ 'InitiateMapChooserVote',
+ 'HasEndOfMapVoteFinished',
+ 'EndOfMapVoteEnabled',
+ 'OnNominationRemoved',
+ 'OnMapVoteStarted',
+ 'CreateTimer',
+ 'KillTimer',
+ 'TriggerTimer',
+ 'GetTickedTime',
+ 'GetMapTimeLeft',
+ 'GetMapTimeLimit',
+ 'ExtendMapTimeLimit',
+ 'GetTickInterval',
+ 'OnMapTimeLeftChanged',
+ 'IsServerProcessing',
+ 'CreateDataTimer',
+ 'ByteCountToCells',
+ 'CreateArray',
+ 'ClearArray',
+ 'CloneArray',
+ 'ResizeArray',
+ 'GetArraySize',
+ 'PushArrayCell',
+ 'PushArrayString',
+ 'PushArrayArray',
+ 'GetArrayCell',
+ 'GetArrayString',
+ 'GetArrayArray',
+ 'SetArrayCell',
+ 'SetArrayString',
+ 'SetArrayArray',
+ 'ShiftArrayUp',
+ 'RemoveFromArray',
+ 'SwapArrayItems',
+ 'FindStringInArray',
+ 'FindValueInArray',
+ 'ProcessTargetString',
+ 'ReplyToTargetError',
+ 'MultiTargetFilter',
+ 'AddMultiTargetFilter',
+ 'RemoveMultiTargetFilter',
+ 'OnBanClient',
+ 'OnBanIdentity',
+ 'OnRemoveBan',
+ 'BanClient',
+ 'BanIdentity',
+ 'RemoveBan',
+ 'CreateTrie',
+ 'SetTrieValue',
+ 'SetTrieArray',
+ 'SetTrieString',
+ 'GetTrieValue',
+ 'GetTrieArray',
+ 'GetTrieString',
+ 'RemoveFromTrie',
+ 'ClearTrie',
+ 'GetTrieSize',
+ 'GetFunctionByName',
+ 'CreateGlobalForward',
+ 'CreateForward',
+ 'GetForwardFunctionCount',
+ 'AddToForward',
+ 'RemoveFromForward',
+ 'RemoveAllFromForward',
+ 'Call_StartForward',
+ 'Call_StartFunction',
+ 'Call_PushCell',
+ 'Call_PushCellRef',
+ 'Call_PushFloat',
+ 'Call_PushFloatRef',
+ 'Call_PushArray',
+ 'Call_PushArrayEx',
+ 'Call_PushString',
+ 'Call_PushStringEx',
+ 'Call_Finish',
+ 'Call_Cancel',
+ 'NativeCall',
+ 'CreateNative',
+ 'ThrowNativeError',
+ 'GetNativeStringLength',
+ 'GetNativeString',
+ 'SetNativeString',
+ 'GetNativeCell',
+ 'GetNativeCellRef',
+ 'SetNativeCellRef',
+ 'GetNativeArray',
+ 'SetNativeArray',
+ 'FormatNativeString',
+ 'RequestFrameCallback',
+ 'RequestFrame',
+ 'OnRebuildAdminCache',
+ 'DumpAdminCache',
+ 'AddCommandOverride',
+ 'GetCommandOverride',
+ 'UnsetCommandOverride',
+ 'CreateAdmGroup',
+ 'FindAdmGroup',
+ 'SetAdmGroupAddFlag',
+ 'GetAdmGroupAddFlag',
+ 'GetAdmGroupAddFlags',
+ 'SetAdmGroupImmuneFrom',
+ 'GetAdmGroupImmuneCount',
+ 'GetAdmGroupImmuneFrom',
+ 'AddAdmGroupCmdOverride',
+ 'GetAdmGroupCmdOverride',
+ 'RegisterAuthIdentType',
+ 'CreateAdmin',
+ 'GetAdminUsername',
+ 'BindAdminIdentity',
+ 'SetAdminFlag',
+ 'GetAdminFlag',
+ 'GetAdminFlags',
+ 'AdminInheritGroup',
+ 'GetAdminGroupCount',
+ 'GetAdminGroup',
+ 'SetAdminPassword',
+ 'GetAdminPassword',
+ 'FindAdminByIdentity',
+ 'RemoveAdmin',
+ 'FlagBitsToBitArray',
+ 'FlagBitArrayToBits',
+ 'FlagArrayToBits',
+ 'FlagBitsToArray',
+ 'FindFlagByName',
+ 'FindFlagByChar',
+ 'FindFlagChar',
+ 'ReadFlagString',
+ 'CanAdminTarget',
+ 'CreateAuthMethod',
+ 'SetAdmGroupImmunityLevel',
+ 'GetAdmGroupImmunityLevel',
+ 'SetAdminImmunityLevel',
+ 'GetAdminImmunityLevel',
+ 'FlagToBit',
+ 'BitToFlag',
+ 'ServerCommand',
+ 'ServerCommandEx',
+ 'InsertServerCommand',
+ 'ServerExecute',
+ 'ClientCommand',
+ 'FakeClientCommand',
+ 'FakeClientCommandEx',
+ 'PrintToServer',
+ 'PrintToConsole',
+ 'ReplyToCommand',
+ 'GetCmdReplySource',
+ 'SetCmdReplySource',
+ 'IsChatTrigger',
+ 'ShowActivity2',
+ 'ShowActivity',
+ 'ShowActivityEx',
+ 'FormatActivitySource',
+ 'SrvCmd',
+ 'RegServerCmd',
+ 'ConCmd',
+ 'RegConsoleCmd',
+ 'RegAdminCmd',
+ 'GetCmdArgs',
+ 'GetCmdArg',
+ 'GetCmdArgString',
+ 'CreateConVar',
+ 'FindConVar',
+ 'ConVarChanged',
+ 'HookConVarChange',
+ 'UnhookConVarChange',
+ 'GetConVarBool',
+ 'SetConVarBool',
+ 'GetConVarInt',
+ 'SetConVarInt',
+ 'GetConVarFloat',
+ 'SetConVarFloat',
+ 'GetConVarString',
+ 'SetConVarString',
+ 'ResetConVar',
+ 'GetConVarDefault',
+ 'GetConVarFlags',
+ 'SetConVarFlags',
+ 'GetConVarBounds',
+ 'SetConVarBounds',
+ 'GetConVarName',
+ 'QueryClientConVar',
+ 'GetCommandIterator',
+ 'ReadCommandIterator',
+ 'CheckCommandAccess',
+ 'CheckAccess',
+ 'IsValidConVarChar',
+ 'GetCommandFlags',
+ 'SetCommandFlags',
+ 'FindFirstConCommand',
+ 'FindNextConCommand',
+ 'SendConVarValue',
+ 'AddServerTag',
+ 'RemoveServerTag',
+ 'CommandListener',
+ 'AddCommandListener',
+ 'RemoveCommandListener',
+ 'CommandExists',
+ 'OnClientSayCommand',
+ 'OnClientSayCommand_Post',
+ 'TF2_IgnitePlayer',
+ 'TF2_RespawnPlayer',
+ 'TF2_RegeneratePlayer',
+ 'TF2_AddCondition',
+ 'TF2_RemoveCondition',
+ 'TF2_SetPlayerPowerPlay',
+ 'TF2_DisguisePlayer',
+ 'TF2_RemovePlayerDisguise',
+ 'TF2_StunPlayer',
+ 'TF2_MakeBleed',
+ 'TF2_GetClass',
+ 'TF2_CalcIsAttackCritical',
+ 'TF2_OnIsHolidayActive',
+ 'TF2_IsHolidayActive',
+ 'TF2_IsPlayerInDuel',
+ 'TF2_RemoveWearable',
+ 'TF2_OnConditionAdded',
+ 'TF2_OnConditionRemoved',
+ 'TF2_OnWaitingForPlayersStart',
+ 'TF2_OnWaitingForPlayersEnd',
+ 'TF2_OnPlayerTeleport',
+ 'SQL_Connect',
+ 'SQL_DefConnect',
+ 'SQL_ConnectCustom',
+ 'SQLite_UseDatabase',
+ 'SQL_CheckConfig',
+ 'SQL_GetDriver',
+ 'SQL_ReadDriver',
+ 'SQL_GetDriverIdent',
+ 'SQL_GetDriverProduct',
+ 'SQL_SetCharset',
+ 'SQL_GetAffectedRows',
+ 'SQL_GetInsertId',
+ 'SQL_GetError',
+ 'SQL_EscapeString',
+ 'SQL_QuoteString',
+ 'SQL_FastQuery',
+ 'SQL_Query',
+ 'SQL_PrepareQuery',
+ 'SQL_FetchMoreResults',
+ 'SQL_HasResultSet',
+ 'SQL_GetRowCount',
+ 'SQL_GetFieldCount',
+ 'SQL_FieldNumToName',
+ 'SQL_FieldNameToNum',
+ 'SQL_FetchRow',
+ 'SQL_MoreRows',
+ 'SQL_Rewind',
+ 'SQL_FetchString',
+ 'SQL_FetchFloat',
+ 'SQL_FetchInt',
+ 'SQL_IsFieldNull',
+ 'SQL_FetchSize',
+ 'SQL_BindParamInt',
+ 'SQL_BindParamFloat',
+ 'SQL_BindParamString',
+ 'SQL_Execute',
+ 'SQL_LockDatabase',
+ 'SQL_UnlockDatabase',
+ 'SQLTCallback',
+ 'SQL_IsSameConnection',
+ 'SQL_TConnect',
+ 'SQL_TQuery',
+ 'SQL_CreateTransaction',
+ 'SQL_AddQuery',
+ 'SQLTxnSuccess',
+ 'SQLTxnFailure',
+ 'SQL_ExecuteTransaction',
+ 'CloseHandle',
+ 'CloneHandle',
+ 'MenuHandler',
+ 'CreateMenu',
+ 'DisplayMenu',
+ 'DisplayMenuAtItem',
+ 'AddMenuItem',
+ 'InsertMenuItem',
+ 'RemoveMenuItem',
+ 'RemoveAllMenuItems',
+ 'GetMenuItem',
+ 'GetMenuSelectionPosition',
+ 'GetMenuItemCount',
+ 'SetMenuPagination',
+ 'GetMenuPagination',
+ 'GetMenuStyle',
+ 'SetMenuTitle',
+ 'GetMenuTitle',
+ 'CreatePanelFromMenu',
+ 'GetMenuExitButton',
+ 'SetMenuExitButton',
+ 'GetMenuExitBackButton',
+ 'SetMenuExitBackButton',
+ 'SetMenuNoVoteButton',
+ 'CancelMenu',
+ 'GetMenuOptionFlags',
+ 'SetMenuOptionFlags',
+ 'IsVoteInProgress',
+ 'CancelVote',
+ 'VoteMenu',
+ 'VoteMenuToAll',
+ 'VoteHandler',
+ 'SetVoteResultCallback',
+ 'CheckVoteDelay',
+ 'IsClientInVotePool',
+ 'RedrawClientVoteMenu',
+ 'GetMenuStyleHandle',
+ 'CreatePanel',
+ 'CreateMenuEx',
+ 'GetClientMenu',
+ 'CancelClientMenu',
+ 'GetMaxPageItems',
+ 'GetPanelStyle',
+ 'SetPanelTitle',
+ 'DrawPanelItem',
+ 'DrawPanelText',
+ 'CanPanelDrawFlags',
+ 'SetPanelKeys',
+ 'SendPanelToClient',
+ 'GetPanelTextRemaining',
+ 'GetPanelCurrentKey',
+ 'SetPanelCurrentKey',
+ 'RedrawMenuItem',
+ 'InternalShowMenu',
+ 'GetMenuVoteInfo',
+ 'IsNewVoteAllowed',
+ 'PrefetchSound',
+ 'EmitAmbientSound',
+ 'FadeClientVolume',
+ 'StopSound',
+ 'EmitSound',
+ 'EmitSentence',
+ 'GetDistGainFromSoundLevel',
+ 'AmbientSHook',
+ 'NormalSHook',
+ 'AddAmbientSoundHook',
+ 'AddNormalSoundHook',
+ 'RemoveAmbientSoundHook',
+ 'RemoveNormalSoundHook',
+ 'EmitSoundToClient',
+ 'EmitSoundToAll',
+ 'ATTN_TO_SNDLEVEL',
+ 'GetGameSoundParams',
+ 'EmitGameSound',
+ 'EmitAmbientGameSound',
+ 'EmitGameSoundToClient',
+ 'EmitGameSoundToAll',
+ 'PrecacheScriptSound',
+ 'strlen',
+ 'StrContains',
+ 'strcmp',
+ 'strncmp',
+ 'StrEqual',
+ 'strcopy',
+ 'Format',
+ 'FormatEx',
+ 'VFormat',
+ 'StringToInt',
+ 'StringToIntEx',
+ 'IntToString',
+ 'StringToFloat',
+ 'StringToFloatEx',
+ 'FloatToString',
+ 'BreakString',
+ 'TrimString',
+ 'SplitString',
+ 'ReplaceString',
+ 'ReplaceStringEx',
+ 'GetCharBytes',
+ 'IsCharAlpha',
+ 'IsCharNumeric',
+ 'IsCharSpace',
+ 'IsCharMB',
+ 'IsCharUpper',
+ 'IsCharLower',
+ 'StripQuotes',
+ 'CharToUpper',
+ 'CharToLower',
+ 'FindCharInString',
+ 'StrCat',
+ 'ExplodeString',
+ 'ImplodeStrings',
+ 'GetVectorLength',
+ 'GetVectorDistance',
+ 'GetVectorDotProduct',
+ 'GetVectorCrossProduct',
+ 'NormalizeVector',
+ 'GetAngleVectors',
+ 'GetVectorAngles',
+ 'GetVectorVectors',
+ 'AddVectors',
+ 'SubtractVectors',
+ 'ScaleVector',
+ 'NegateVector',
+ 'MakeVectorFromPoints',
+ 'BaseComm_IsClientGagged',
+ 'BaseComm_IsClientMuted',
+ 'BaseComm_SetClientGag',
+ 'BaseComm_SetClientMute',
+ 'FormatUserLogText',
+ 'FindPluginByFile',
+ 'FindTarget',
+ 'AcceptEntityInput',
+ 'SetVariantBool',
+ 'SetVariantString',
+ 'SetVariantInt',
+ 'SetVariantFloat',
+ 'SetVariantVector3D',
+ 'SetVariantPosVector3D',
+ 'SetVariantColor',
+ 'SetVariantEntity',
+ 'GameRules_GetProp',
+ 'GameRules_SetProp',
+ 'GameRules_GetPropFloat',
+ 'GameRules_SetPropFloat',
+ 'GameRules_GetPropEnt',
+ 'GameRules_SetPropEnt',
+ 'GameRules_GetPropVector',
+ 'GameRules_SetPropVector',
+ 'GameRules_GetPropString',
+ 'GameRules_SetPropString',
+ 'GameRules_GetRoundState',
+ 'OnClientConnect',
+ 'OnClientConnected',
+ 'OnClientPutInServer',
+ 'OnClientDisconnect',
+ 'OnClientDisconnect_Post',
+ 'OnClientCommand',
+ 'OnClientSettingsChanged',
+ 'OnClientAuthorized',
+ 'OnClientPreAdminCheck',
+ 'OnClientPostAdminFilter',
+ 'OnClientPostAdminCheck',
+ 'GetMaxClients',
+ 'GetMaxHumanPlayers',
+ 'GetClientCount',
+ 'GetClientName',
+ 'GetClientIP',
+ 'GetClientAuthString',
+ 'GetClientAuthId',
+ 'GetSteamAccountID',
+ 'GetClientUserId',
+ 'IsClientConnected',
+ 'IsClientInGame',
+ 'IsClientInKickQueue',
+ 'IsClientAuthorized',
+ 'IsFakeClient',
+ 'IsClientSourceTV',
+ 'IsClientReplay',
+ 'IsClientObserver',
+ 'IsPlayerAlive',
+ 'GetClientInfo',
+ 'GetClientTeam',
+ 'SetUserAdmin',
+ 'GetUserAdmin',
+ 'AddUserFlags',
+ 'RemoveUserFlags',
+ 'SetUserFlagBits',
+ 'GetUserFlagBits',
+ 'CanUserTarget',
+ 'RunAdminCacheChecks',
+ 'NotifyPostAdminCheck',
+ 'CreateFakeClient',
+ 'SetFakeClientConVar',
+ 'GetClientHealth',
+ 'GetClientModel',
+ 'GetClientWeapon',
+ 'GetClientMaxs',
+ 'GetClientMins',
+ 'GetClientAbsAngles',
+ 'GetClientAbsOrigin',
+ 'GetClientArmor',
+ 'GetClientDeaths',
+ 'GetClientFrags',
+ 'GetClientDataRate',
+ 'IsClientTimingOut',
+ 'GetClientTime',
+ 'GetClientLatency',
+ 'GetClientAvgLatency',
+ 'GetClientAvgLoss',
+ 'GetClientAvgChoke',
+ 'GetClientAvgData',
+ 'GetClientAvgPackets',
+ 'GetClientOfUserId',
+ 'KickClient',
+ 'KickClientEx',
+ 'ChangeClientTeam',
+ 'GetClientSerial',
+ 'GetClientFromSerial',
+ 'FindStringTable',
+ 'GetNumStringTables',
+ 'GetStringTableNumStrings',
+ 'GetStringTableMaxStrings',
+ 'GetStringTableName',
+ 'FindStringIndex',
+ 'ReadStringTable',
+ 'GetStringTableDataLength',
+ 'GetStringTableData',
+ 'SetStringTableData',
+ 'AddToStringTable',
+ 'LockStringTables',
+ 'AddFileToDownloadsTable',
+ 'GetEntityFlags',
+ 'SetEntityFlags',
+ 'GetEntityMoveType',
+ 'SetEntityMoveType',
+ 'GetEntityRenderMode',
+ 'SetEntityRenderMode',
+ 'GetEntityRenderFx',
+ 'SetEntityRenderFx',
+ 'SetEntityRenderColor',
+ 'GetEntityGravity',
+ 'SetEntityGravity',
+ 'SetEntityHealth',
+ 'GetClientButtons',
+ 'EntityOutput',
+ 'HookEntityOutput',
+ 'UnhookEntityOutput',
+ 'HookSingleEntityOutput',
+ 'UnhookSingleEntityOutput',
+ 'SMC_CreateParser',
+ 'SMC_ParseFile',
+ 'SMC_GetErrorString',
+ 'SMC_ParseStart',
+ 'SMC_SetParseStart',
+ 'SMC_ParseEnd',
+ 'SMC_SetParseEnd',
+ 'SMC_NewSection',
+ 'SMC_KeyValue',
+ 'SMC_EndSection',
+ 'SMC_SetReaders',
+ 'SMC_RawLine',
+ 'SMC_SetRawLine',
+ 'BfWriteBool',
+ 'BfWriteByte',
+ 'BfWriteChar',
+ 'BfWriteShort',
+ 'BfWriteWord',
+ 'BfWriteNum',
+ 'BfWriteFloat',
+ 'BfWriteString',
+ 'BfWriteEntity',
+ 'BfWriteAngle',
+ 'BfWriteCoord',
+ 'BfWriteVecCoord',
+ 'BfWriteVecNormal',
+ 'BfWriteAngles',
+ 'BfReadBool',
+ 'BfReadByte',
+ 'BfReadChar',
+ 'BfReadShort',
+ 'BfReadWord',
+ 'BfReadNum',
+ 'BfReadFloat',
+ 'BfReadString',
+ 'BfReadEntity',
+ 'BfReadAngle',
+ 'BfReadCoord',
+ 'BfReadVecCoord',
+ 'BfReadVecNormal',
+ 'BfReadAngles',
+ 'BfGetNumBytesLeft',
+ 'CreateProfiler',
+ 'StartProfiling',
+ 'StopProfiling',
+ 'GetProfilerTime',
+ 'OnPluginStart',
+ 'AskPluginLoad2',
+ 'OnPluginEnd',
+ 'OnPluginPauseChange',
+ 'OnGameFrame',
+ 'OnMapStart',
+ 'OnMapEnd',
+ 'OnConfigsExecuted',
+ 'OnAutoConfigsBuffered',
+ 'OnAllPluginsLoaded',
+ 'GetMyHandle',
+ 'GetPluginIterator',
+ 'MorePlugins',
+ 'ReadPlugin',
+ 'GetPluginStatus',
+ 'GetPluginFilename',
+ 'IsPluginDebugging',
+ 'GetPluginInfo',
+ 'FindPluginByNumber',
+ 'SetFailState',
+ 'ThrowError',
+ 'GetTime',
+ 'FormatTime',
+ 'LoadGameConfigFile',
+ 'GameConfGetOffset',
+ 'GameConfGetKeyValue',
+ 'GameConfGetAddress',
+ 'GetSysTickCount',
+ 'AutoExecConfig',
+ 'RegPluginLibrary',
+ 'LibraryExists',
+ 'GetExtensionFileStatus',
+ 'OnLibraryAdded',
+ 'OnLibraryRemoved',
+ 'ReadMapList',
+ 'SetMapListCompatBind',
+ 'OnClientFloodCheck',
+ 'OnClientFloodResult',
+ 'CanTestFeatures',
+ 'GetFeatureStatus',
+ 'RequireFeature',
+ 'LoadFromAddress',
+ 'StoreToAddress',
+ 'CreateStack',
+ 'PushStackCell',
+ 'PushStackString',
+ 'PushStackArray',
+ 'PopStackCell',
+ 'PopStackString',
+ 'PopStackArray',
+ 'IsStackEmpty',
+ 'PopStack',
+ 'OnPlayerRunCmd',
+ 'BuildPath',
+ 'OpenDirectory',
+ 'ReadDirEntry',
+ 'OpenFile',
+ 'DeleteFile',
+ 'ReadFileLine',
+ 'ReadFile',
+ 'ReadFileString',
+ 'WriteFile',
+ 'WriteFileString',
+ 'WriteFileLine',
+ 'ReadFileCell',
+ 'WriteFileCell',
+ 'IsEndOfFile',
+ 'FileSeek',
+ 'FilePosition',
+ 'FileExists',
+ 'RenameFile',
+ 'DirExists',
+ 'FileSize',
+ 'FlushFile',
+ 'RemoveDir',
+ 'CreateDirectory',
+ 'GetFileTime',
+ 'LogToOpenFile',
+ 'LogToOpenFileEx',
+ 'PbReadInt',
+ 'PbReadFloat',
+ 'PbReadBool',
+ 'PbReadString',
+ 'PbReadColor',
+ 'PbReadAngle',
+ 'PbReadVector',
+ 'PbReadVector2D',
+ 'PbGetRepeatedFieldCount',
+ 'PbSetInt',
+ 'PbSetFloat',
+ 'PbSetBool',
+ 'PbSetString',
+ 'PbSetColor',
+ 'PbSetAngle',
+ 'PbSetVector',
+ 'PbSetVector2D',
+ 'PbAddInt',
+ 'PbAddFloat',
+ 'PbAddBool',
+ 'PbAddString',
+ 'PbAddColor',
+ 'PbAddAngle',
+ 'PbAddVector',
+ 'PbAddVector2D',
+ 'PbRemoveRepeatedFieldValue',
+ 'PbReadMessage',
+ 'PbReadRepeatedMessage',
+ 'PbAddMessage',
+ 'SetNextMap',
+ 'GetNextMap',
+ 'ForceChangeLevel',
+ 'GetMapHistorySize',
+ 'GetMapHistory',
+ 'GeoipCode2',
+ 'GeoipCode3',
+ 'GeoipCountry',
+ 'MarkNativeAsOptional',
+ 'RegClientCookie',
+ 'FindClientCookie',
+ 'SetClientCookie',
+ 'GetClientCookie',
+ 'SetAuthIdCookie',
+ 'AreClientCookiesCached',
+ 'OnClientCookiesCached',
+ 'CookieMenuHandler',
+ 'SetCookiePrefabMenu',
+ 'SetCookieMenuItem',
+ 'ShowCookieMenu',
+ 'GetCookieIterator',
+ 'ReadCookieIterator',
+ 'GetCookieAccess',
+ 'GetClientCookieTime',
+ 'LoadTranslations',
+ 'SetGlobalTransTarget',
+ 'GetClientLanguage',
+ 'GetServerLanguage',
+ 'GetLanguageCount',
+ 'GetLanguageInfo',
+ 'SetClientLanguage',
+ 'GetLanguageByCode',
+ 'GetLanguageByName',
+ 'CS_OnBuyCommand',
+ 'CS_OnCSWeaponDrop',
+ 'CS_OnGetWeaponPrice',
+ 'CS_OnTerminateRound',
+ 'CS_RespawnPlayer',
+ 'CS_SwitchTeam',
+ 'CS_DropWeapon',
+ 'CS_TerminateRound',
+ 'CS_GetTranslatedWeaponAlias',
+ 'CS_GetWeaponPrice',
+ 'CS_GetClientClanTag',
+ 'CS_SetClientClanTag',
+ 'CS_GetTeamScore',
+ 'CS_SetTeamScore',
+ 'CS_GetMVPCount',
+ 'CS_SetMVPCount',
+ 'CS_GetClientContributionScore',
+ 'CS_SetClientContributionScore',
+ 'CS_GetClientAssists',
+ 'CS_SetClientAssists',
+ 'CS_AliasToWeaponID',
+ 'CS_WeaponIDToAlias',
+ 'CS_IsValidWeaponID',
+ 'CS_UpdateClientModel',
+ 'LogToGame',
+ 'SetRandomSeed',
+ 'GetRandomFloat',
+ 'GetRandomInt',
+ 'IsMapValid',
+ 'IsDedicatedServer',
+ 'GetEngineTime',
+ 'GetGameTime',
+ 'GetGameTickCount',
+ 'GetGameDescription',
+ 'GetGameFolderName',
+ 'GetCurrentMap',
+ 'PrecacheModel',
+ 'PrecacheSentenceFile',
+ 'PrecacheDecal',
+ 'PrecacheGeneric',
+ 'IsModelPrecached',
+ 'IsDecalPrecached',
+ 'IsGenericPrecached',
+ 'PrecacheSound',
+ 'IsSoundPrecached',
+ 'CreateDialog',
+ 'GetEngineVersion',
+ 'PrintToChat',
+ 'PrintToChatAll',
+ 'PrintCenterText',
+ 'PrintCenterTextAll',
+ 'PrintHintText',
+ 'PrintHintTextToAll',
+ 'ShowVGUIPanel',
+ 'CreateHudSynchronizer',
+ 'SetHudTextParams',
+ 'SetHudTextParamsEx',
+ 'ShowSyncHudText',
+ 'ClearSyncHud',
+ 'ShowHudText',
+ 'ShowMOTDPanel',
+ 'DisplayAskConnectBox',
+ 'EntIndexToEntRef',
+ 'EntRefToEntIndex',
+ 'MakeCompatEntRef',
+ 'SetClientViewEntity',
+ 'SetLightStyle',
+ 'GetClientEyePosition',
+ 'CreateDataPack',
+ 'WritePackCell',
+ 'WritePackFloat',
+ 'WritePackString',
+ 'ReadPackCell',
+ 'ReadPackFloat',
+ 'ReadPackString',
+ 'ResetPack',
+ 'GetPackPosition',
+ 'SetPackPosition',
+ 'IsPackReadable',
+ 'LogMessage',
+ 'LogToFile',
+ 'LogToFileEx',
+ 'LogAction',
+ 'LogError',
+ 'OnLogAction',
+ 'GameLogHook',
+ 'AddGameLogHook',
+ 'RemoveGameLogHook',
+ 'FindTeamByName',
+ 'StartPrepSDKCall',
+ 'PrepSDKCall_SetVirtual',
+ 'PrepSDKCall_SetSignature',
+ 'PrepSDKCall_SetAddress',
+ 'PrepSDKCall_SetFromConf',
+ 'PrepSDKCall_SetReturnInfo',
+ 'PrepSDKCall_AddParameter',
+ 'EndPrepSDKCall',
+ 'SDKCall',
+ 'GetPlayerResourceEntity',
+)
+
+
+if __name__ == '__main__': # pragma: no cover
+ import re
+ from urllib.request import FancyURLopener
+
+ from pygments.util import format_lines
+
+ class Opener(FancyURLopener):
+ version = 'Mozilla/5.0 (Pygments Sourcemod Builtins Update)'
+
+ opener = Opener()
+
+ def get_version():
+ f = opener.open('http://docs.sourcemod.net/api/index.php')
+ r = re.compile(r'SourceMod v\.<b>([\d\.]+(?:-\w+)?)</td>')
+ for line in f:
+ m = r.search(line.decode())
+ if m is not None:
+ return m.groups()[0]
+ raise ValueError('No version in api docs')
+
+ def get_sm_functions():
+ f = opener.open('http://docs.sourcemod.net/api/SMfuncs.js')
+ r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
+ functions = []
+ for line in f:
+ m = r.match(line.decode())
+ if m is not None:
+ functions.append(m.groups()[0])
+ return functions
+
+ def regenerate(filename, natives):
+ with open(filename) as fp:
+ content = fp.read()
+
+ header = content[:content.find('FUNCTIONS = (')]
+ footer = content[content.find("if __name__ == '__main__':")-1:]
+
+
+ with open(filename, 'w') as fp:
+ fp.write(header)
+ fp.write(format_lines('FUNCTIONS', natives))
+ fp.write('\n\n' + footer)
+
+ def run():
+ version = get_version()
+ print('> Downloading function index for SourceMod %s' % version)
+ functions = get_sm_functions()
+ print('> %d functions found:' % len(functions))
+
+ functionlist = []
+ for full_function_name in functions:
+ print('>> %s' % full_function_name)
+ functionlist.append(full_function_name)
+
+ regenerate(__file__, functionlist)
+
+
+ run()
diff --git a/pygments/lexers/_stan_builtins.py b/pygments/lexers/_stan_builtins.py
new file mode 100644
index 0000000..2ab1c87
--- /dev/null
+++ b/pygments/lexers/_stan_builtins.py
@@ -0,0 +1,648 @@
+"""
+ pygments.lexers._stan_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file contains the names of functions for Stan used by
+ ``pygments.lexers.math.StanLexer. This is for Stan language version 2.29.0.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+KEYWORDS = (
+ 'break',
+ 'continue',
+ 'else',
+ 'for',
+ 'if',
+ 'in',
+ 'print',
+ 'reject',
+ 'return',
+ 'while',
+)
+
+TYPES = (
+ 'cholesky_factor_corr',
+ 'cholesky_factor_cov',
+ 'corr_matrix',
+ 'cov_matrix',
+ 'int',
+ 'matrix',
+ 'ordered',
+ 'positive_ordered',
+ 'real',
+ 'row_vector',
+ 'simplex',
+ 'unit_vector',
+ 'vector',
+ 'void',
+ 'array',
+ 'complex'
+)
+
+FUNCTIONS = (
+ 'abs',
+ 'acos',
+ 'acosh',
+ 'add_diag',
+ 'algebra_solver',
+ 'algebra_solver_newton',
+ 'append_array',
+ 'append_col',
+ 'append_row',
+ 'arg',
+ 'asin',
+ 'asinh',
+ 'atan',
+ 'atan2',
+ 'atanh',
+ 'bernoulli_cdf',
+ 'bernoulli_lccdf',
+ 'bernoulli_lcdf',
+ 'bernoulli_logit_glm_lpmf',
+ 'bernoulli_logit_glm_lupmf',
+ 'bernoulli_logit_glm_rng',
+ 'bernoulli_logit_lpmf',
+ 'bernoulli_logit_lupmf',
+ 'bernoulli_logit_rng',
+ 'bernoulli_lpmf',
+ 'bernoulli_lupmf',
+ 'bernoulli_rng',
+ 'bessel_first_kind',
+ 'bessel_second_kind',
+ 'beta',
+ 'beta_binomial_cdf',
+ 'beta_binomial_lccdf',
+ 'beta_binomial_lcdf',
+ 'beta_binomial_lpmf',
+ 'beta_binomial_lupmf',
+ 'beta_binomial_rng',
+ 'beta_cdf',
+ 'beta_lccdf',
+ 'beta_lcdf',
+ 'beta_lpdf',
+ 'beta_lupdf',
+ 'beta_proportion_lccdf',
+ 'beta_proportion_lcdf',
+ 'beta_proportion_rng',
+ 'beta_rng',
+ 'binary_log_loss',
+ 'binomial_cdf',
+ 'binomial_coefficient_log',
+ 'binomial_lccdf',
+ 'binomial_lcdf',
+ 'binomial_logit_lpmf',
+ 'binomial_logit_lupmf',
+ 'binomial_lpmf',
+ 'binomial_lupmf',
+ 'binomial_rng',
+ 'block',
+ 'categorical_logit_glm_lpmf',
+ 'categorical_logit_glm_lupmf',
+ 'categorical_logit_lpmf',
+ 'categorical_logit_lupmf',
+ 'categorical_logit_rng',
+ 'categorical_lpmf',
+ 'categorical_lupmf',
+ 'categorical_rng',
+ 'cauchy_cdf',
+ 'cauchy_lccdf',
+ 'cauchy_lcdf',
+ 'cauchy_lpdf',
+ 'cauchy_lupdf',
+ 'cauchy_rng',
+ 'cbrt',
+ 'ceil',
+ 'chi_square_cdf',
+ 'chi_square_lccdf',
+ 'chi_square_lcdf',
+ 'chi_square_lpdf',
+ 'chi_square_lupdf',
+ 'chi_square_rng',
+ 'chol2inv',
+ 'cholesky_decompose',
+ 'choose',
+ 'col',
+ 'cols',
+ 'columns_dot_product',
+ 'columns_dot_self',
+ 'conj',
+ 'cos',
+ 'cosh',
+ 'cov_exp_quad',
+ 'crossprod',
+ 'csr_extract_u',
+ 'csr_extract_v',
+ 'csr_extract_w',
+ 'csr_matrix_times_vector',
+ 'csr_to_dense_matrix',
+ 'cumulative_sum',
+ 'dae',
+ 'dae_tol',
+ 'determinant',
+ 'diag_matrix',
+ 'diag_post_multiply',
+ 'diag_pre_multiply',
+ 'diagonal',
+ 'digamma',
+ 'dims',
+ 'dirichlet_lpdf',
+ 'dirichlet_lupdf',
+ 'dirichlet_rng',
+ 'discrete_range_cdf',
+ 'discrete_range_lccdf',
+ 'discrete_range_lcdf',
+ 'discrete_range_lpmf',
+ 'discrete_range_lupmf',
+ 'discrete_range_rng',
+ 'distance',
+ 'dot_product',
+ 'dot_self',
+ 'double_exponential_cdf',
+ 'double_exponential_lccdf',
+ 'double_exponential_lcdf',
+ 'double_exponential_lpdf',
+ 'double_exponential_lupdf',
+ 'double_exponential_rng',
+ 'e',
+ 'eigenvalues_sym',
+ 'eigenvectors_sym',
+ 'erf',
+ 'erfc',
+ 'exp',
+ 'exp2',
+ 'exp_mod_normal_cdf',
+ 'exp_mod_normal_lccdf',
+ 'exp_mod_normal_lcdf',
+ 'exp_mod_normal_lpdf',
+ 'exp_mod_normal_lupdf',
+ 'exp_mod_normal_rng',
+ 'expm1',
+ 'exponential_cdf',
+ 'exponential_lccdf',
+ 'exponential_lcdf',
+ 'exponential_lpdf',
+ 'exponential_lupdf',
+ 'exponential_rng',
+ 'fabs',
+ 'falling_factorial',
+ 'fdim',
+ 'floor',
+ 'fma',
+ 'fmax',
+ 'fmin',
+ 'fmod',
+ 'frechet_cdf',
+ 'frechet_lccdf',
+ 'frechet_lcdf',
+ 'frechet_lpdf',
+ 'frechet_lupdf',
+ 'frechet_rng',
+ 'gamma_cdf',
+ 'gamma_lccdf',
+ 'gamma_lcdf',
+ 'gamma_lpdf',
+ 'gamma_lupdf',
+ 'gamma_p',
+ 'gamma_q',
+ 'gamma_rng',
+ 'gaussian_dlm_obs_lpdf',
+ 'gaussian_dlm_obs_lupdf',
+ 'generalized_inverse',
+ 'get_imag',
+ 'get_lp',
+ 'get_real',
+ 'gumbel_cdf',
+ 'gumbel_lccdf',
+ 'gumbel_lcdf',
+ 'gumbel_lpdf',
+ 'gumbel_lupdf',
+ 'gumbel_rng',
+ 'head',
+ 'hmm_hidden_state_prob',
+ 'hmm_latent_rng',
+ 'hmm_marginal',
+ 'hypergeometric_lpmf',
+ 'hypergeometric_lupmf',
+ 'hypergeometric_rng',
+ 'hypot',
+ 'identity_matrix',
+ 'inc_beta',
+ 'int_step',
+ 'integrate_1d',
+ 'integrate_ode',
+ 'integrate_ode_adams',
+ 'integrate_ode_bdf',
+ 'integrate_ode_rk45',
+ 'inv',
+ 'inv_chi_square_cdf',
+ 'inv_chi_square_lccdf',
+ 'inv_chi_square_lcdf',
+ 'inv_chi_square_lpdf',
+ 'inv_chi_square_lupdf',
+ 'inv_chi_square_rng',
+ 'inv_cloglog',
+ 'inv_erfc',
+ 'inv_gamma_cdf',
+ 'inv_gamma_lccdf',
+ 'inv_gamma_lcdf',
+ 'inv_gamma_lpdf',
+ 'inv_gamma_lupdf',
+ 'inv_gamma_rng',
+ 'inv_logit',
+ 'inv_Phi',
+ 'inv_sqrt',
+ 'inv_square',
+ 'inv_wishart_lpdf',
+ 'inv_wishart_lupdf',
+ 'inv_wishart_rng',
+ 'inverse',
+ 'inverse_spd',
+ 'is_inf',
+ 'is_nan',
+ 'lambert_w0',
+ 'lambert_wm1',
+ 'lbeta',
+ 'lchoose',
+ 'ldexp',
+ 'lgamma',
+ 'linspaced_array',
+ 'linspaced_int_array',
+ 'linspaced_row_vector',
+ 'linspaced_vector',
+ 'lkj_corr_cholesky_lpdf',
+ 'lkj_corr_cholesky_lupdf',
+ 'lkj_corr_cholesky_rng',
+ 'lkj_corr_lpdf',
+ 'lkj_corr_lupdf',
+ 'lkj_corr_rng',
+ 'lmgamma',
+ 'lmultiply',
+ 'log',
+ 'log10',
+ 'log1m',
+ 'log1m_exp',
+ 'log1m_inv_logit',
+ 'log1p',
+ 'log1p_exp',
+ 'log2',
+ 'log_determinant',
+ 'log_diff_exp',
+ 'log_falling_factorial',
+ 'log_inv_logit',
+ 'log_inv_logit_diff',
+ 'log_mix',
+ 'log_modified_bessel_first_kind',
+ 'log_rising_factorial',
+ 'log_softmax',
+ 'log_sum_exp',
+ 'logistic_cdf',
+ 'logistic_lccdf',
+ 'logistic_lcdf',
+ 'logistic_lpdf',
+ 'logistic_lupdf',
+ 'logistic_rng',
+ 'logit',
+ 'loglogistic_cdf',
+ 'loglogistic_lpdf',
+ 'loglogistic_rng',
+ 'lognormal_cdf',
+ 'lognormal_lccdf',
+ 'lognormal_lcdf',
+ 'lognormal_lpdf',
+ 'lognormal_lupdf',
+ 'lognormal_rng',
+ 'machine_precision',
+ 'map_rect',
+ 'matrix_exp',
+ 'matrix_exp_multiply',
+ 'matrix_power',
+ 'max',
+ 'mdivide_left_spd',
+ 'mdivide_left_tri_low',
+ 'mdivide_right_spd',
+ 'mdivide_right_tri_low',
+ 'mean',
+ 'min',
+ 'modified_bessel_first_kind',
+ 'modified_bessel_second_kind',
+ 'multi_gp_cholesky_lpdf',
+ 'multi_gp_cholesky_lupdf',
+ 'multi_gp_lpdf',
+ 'multi_gp_lupdf',
+ 'multi_normal_cholesky_lpdf',
+ 'multi_normal_cholesky_lupdf',
+ 'multi_normal_cholesky_rng',
+ 'multi_normal_lpdf',
+ 'multi_normal_lupdf',
+ 'multi_normal_prec_lpdf',
+ 'multi_normal_prec_lupdf',
+ 'multi_normal_rng',
+ 'multi_student_t_lpdf',
+ 'multi_student_t_lupdf',
+ 'multi_student_t_rng',
+ 'multinomial_logit_lpmf',
+ 'multinomial_logit_lupmf',
+ 'multinomial_logit_rng',
+ 'multinomial_lpmf',
+ 'multinomial_lupmf',
+ 'multinomial_rng',
+ 'multiply_log',
+ 'multiply_lower_tri_self_transpose',
+ 'neg_binomial_2_cdf',
+ 'neg_binomial_2_lccdf',
+ 'neg_binomial_2_lcdf',
+ 'neg_binomial_2_log_glm_lpmf',
+ 'neg_binomial_2_log_glm_lupmf',
+ 'neg_binomial_2_log_lpmf',
+ 'neg_binomial_2_log_lupmf',
+ 'neg_binomial_2_log_rng',
+ 'neg_binomial_2_lpmf',
+ 'neg_binomial_2_lupmf',
+ 'neg_binomial_2_rng',
+ 'neg_binomial_cdf',
+ 'neg_binomial_lccdf',
+ 'neg_binomial_lcdf',
+ 'neg_binomial_lpmf',
+ 'neg_binomial_lupmf',
+ 'neg_binomial_rng',
+ 'negative_infinity',
+ 'norm',
+ 'normal_cdf',
+ 'normal_id_glm_lpdf',
+ 'normal_id_glm_lupdf',
+ 'normal_lccdf',
+ 'normal_lcdf',
+ 'normal_lpdf',
+ 'normal_lupdf',
+ 'normal_rng',
+ 'not_a_number',
+ 'num_elements',
+ 'ode_adams',
+ 'ode_adams_tol',
+ 'ode_adjoint_tol_ctl',
+ 'ode_bdf',
+ 'ode_bdf_tol',
+ 'ode_ckrk',
+ 'ode_ckrk_tol',
+ 'ode_rk45',
+ 'ode_rk45_tol',
+ 'one_hot_array',
+ 'one_hot_int_array',
+ 'one_hot_row_vector',
+ 'one_hot_vector',
+ 'ones_array',
+ 'ones_int_array',
+ 'ones_row_vector',
+ 'ones_vector',
+ 'ordered_logistic_glm_lpmf',
+ 'ordered_logistic_glm_lupmf',
+ 'ordered_logistic_lpmf',
+ 'ordered_logistic_lupmf',
+ 'ordered_logistic_rng',
+ 'ordered_probit_lpmf',
+ 'ordered_probit_lupmf',
+ 'ordered_probit_rng',
+ 'owens_t',
+ 'pareto_cdf',
+ 'pareto_lccdf',
+ 'pareto_lcdf',
+ 'pareto_lpdf',
+ 'pareto_lupdf',
+ 'pareto_rng',
+ 'pareto_type_2_cdf',
+ 'pareto_type_2_lccdf',
+ 'pareto_type_2_lcdf',
+ 'pareto_type_2_lpdf',
+ 'pareto_type_2_lupdf',
+ 'pareto_type_2_rng',
+ 'Phi',
+ 'Phi_approx',
+ 'pi',
+ 'poisson_cdf',
+ 'poisson_lccdf',
+ 'poisson_lcdf',
+ 'poisson_log_glm_lpmf',
+ 'poisson_log_glm_lupmf',
+ 'poisson_log_lpmf',
+ 'poisson_log_lupmf',
+ 'poisson_log_rng',
+ 'poisson_lpmf',
+ 'poisson_lupmf',
+ 'poisson_rng',
+ 'polar',
+ 'positive_infinity',
+ 'pow',
+ 'print',
+ 'prod',
+ 'proj',
+ 'qr_Q',
+ 'qr_R',
+ 'qr_thin_Q',
+ 'qr_thin_R',
+ 'quad_form',
+ 'quad_form_diag',
+ 'quad_form_sym',
+ 'quantile',
+ 'rank',
+ 'rayleigh_cdf',
+ 'rayleigh_lccdf',
+ 'rayleigh_lcdf',
+ 'rayleigh_lpdf',
+ 'rayleigh_lupdf',
+ 'rayleigh_rng',
+ 'reduce_sum',
+ 'reject',
+ 'rep_array',
+ 'rep_matrix',
+ 'rep_row_vector',
+ 'rep_vector',
+ 'reverse',
+ 'rising_factorial',
+ 'round',
+ 'row',
+ 'rows',
+ 'rows_dot_product',
+ 'rows_dot_self',
+ 'scale_matrix_exp_multiply',
+ 'scaled_inv_chi_square_cdf',
+ 'scaled_inv_chi_square_lccdf',
+ 'scaled_inv_chi_square_lcdf',
+ 'scaled_inv_chi_square_lpdf',
+ 'scaled_inv_chi_square_lupdf',
+ 'scaled_inv_chi_square_rng',
+ 'sd',
+ 'segment',
+ 'sin',
+ 'singular_values',
+ 'sinh',
+ 'size',
+ 'skew_double_exponential_cdf',
+ 'skew_double_exponential_lccdf',
+ 'skew_double_exponential_lcdf',
+ 'skew_double_exponential_lpdf',
+ 'skew_double_exponential_lupdf',
+ 'skew_double_exponential_rng',
+ 'skew_normal_cdf',
+ 'skew_normal_lccdf',
+ 'skew_normal_lcdf',
+ 'skew_normal_lpdf',
+ 'skew_normal_lupdf',
+ 'skew_normal_rng',
+ 'softmax',
+ 'sort_asc',
+ 'sort_desc',
+ 'sort_indices_asc',
+ 'sort_indices_desc',
+ 'sqrt',
+ 'sqrt2',
+ 'square',
+ 'squared_distance',
+ 'std_normal_cdf',
+ 'std_normal_lccdf',
+ 'std_normal_lcdf',
+ 'std_normal_lpdf',
+ 'std_normal_lupdf',
+ 'std_normal_rng',
+ 'step',
+ 'student_t_cdf',
+ 'student_t_lccdf',
+ 'student_t_lcdf',
+ 'student_t_lpdf',
+ 'student_t_lupdf',
+ 'student_t_rng',
+ 'sub_col',
+ 'sub_row',
+ 'sum',
+ 'svd_U',
+ 'svd_V',
+ 'symmetrize_from_lower_tri',
+ 'tail',
+ 'tan',
+ 'tanh',
+ 'target',
+ 'tcrossprod',
+ 'tgamma',
+ 'to_array_1d',
+ 'to_array_2d',
+ 'to_complex',
+ 'to_matrix',
+ 'to_row_vector',
+ 'to_vector',
+ 'trace',
+ 'trace_gen_quad_form',
+ 'trace_quad_form',
+ 'trigamma',
+ 'trunc',
+ 'uniform_cdf',
+ 'uniform_lccdf',
+ 'uniform_lcdf',
+ 'uniform_lpdf',
+ 'uniform_lupdf',
+ 'uniform_rng',
+ 'uniform_simplex',
+ 'variance',
+ 'von_mises_cdf',
+ 'von_mises_lccdf',
+ 'von_mises_lcdf',
+ 'von_mises_lpdf',
+ 'von_mises_lupdf',
+ 'von_mises_rng',
+ 'weibull_cdf',
+ 'weibull_lccdf',
+ 'weibull_lcdf',
+ 'weibull_lpdf',
+ 'weibull_lupdf',
+ 'weibull_rng',
+ 'wiener_lpdf',
+ 'wiener_lupdf',
+ 'wishart_lpdf',
+ 'wishart_lupdf',
+ 'wishart_rng',
+ 'zeros_array',
+ 'zeros_int_array',
+ 'zeros_row_vector'
+)
+
+DISTRIBUTIONS = (
+ 'bernoulli',
+ 'bernoulli_logit',
+ 'bernoulli_logit_glm',
+ 'beta',
+ 'beta_binomial',
+ 'binomial',
+ 'binomial_logit',
+ 'categorical',
+ 'categorical_logit',
+ 'categorical_logit_glm',
+ 'cauchy',
+ 'chi_square',
+ 'dirichlet',
+ 'discrete_range',
+ 'double_exponential',
+ 'exp_mod_normal',
+ 'exponential',
+ 'frechet',
+ 'gamma',
+ 'gaussian_dlm_obs',
+ 'gumbel',
+ 'hypergeometric',
+ 'inv_chi_square',
+ 'inv_gamma',
+ 'inv_wishart',
+ 'lkj_corr',
+ 'lkj_corr_cholesky',
+ 'logistic',
+ 'loglogistic',
+ 'lognormal',
+ 'multi_gp',
+ 'multi_gp_cholesky',
+ 'multi_normal',
+ 'multi_normal_cholesky',
+ 'multi_normal_prec',
+ 'multi_student_t',
+ 'multinomial',
+ 'multinomial_logit',
+ 'neg_binomial',
+ 'neg_binomial_2',
+ 'neg_binomial_2_log',
+ 'neg_binomial_2_log_glm',
+ 'normal',
+ 'normal_id_glm',
+ 'ordered_logistic',
+ 'ordered_logistic_glm',
+ 'ordered_probit',
+ 'pareto',
+ 'pareto_type_2',
+ 'poisson',
+ 'poisson_log',
+ 'poisson_log_glm',
+ 'rayleigh',
+ 'scaled_inv_chi_square',
+ 'skew_double_exponential',
+ 'skew_normal',
+ 'std_normal',
+ 'student_t',
+ 'uniform',
+ 'von_mises',
+ 'weibull',
+ 'wiener',
+ 'wishart',
+)
+
+RESERVED = (
+ 'repeat',
+ 'until',
+ 'then',
+ 'true',
+ 'false',
+ 'var',
+ 'struct',
+ 'typedef',
+ 'export',
+ 'auto',
+ 'extern',
+ 'var',
+ 'static',
+)
diff --git a/pygments/lexers/_stata_builtins.py b/pygments/lexers/_stata_builtins.py
new file mode 100644
index 0000000..1343212
--- /dev/null
+++ b/pygments/lexers/_stata_builtins.py
@@ -0,0 +1,457 @@
+"""
+ pygments.lexers._stata_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Builtins for Stata
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+builtins_special = (
+ "if", "in", "using", "replace", "by", "gen", "generate"
+)
+
+builtins_base = (
+ "if", "else", "in", "foreach", "for", "forv", "forva",
+ "forval", "forvalu", "forvalue", "forvalues", "by", "bys",
+ "bysort", "quietly", "qui", "about", "ac",
+ "ac_7", "acprplot", "acprplot_7", "adjust", "ado", "adopath",
+ "adoupdate", "alpha", "ameans", "an", "ano", "anov", "anova",
+ "anova_estat", "anova_terms", "anovadef", "aorder", "ap", "app",
+ "appe", "appen", "append", "arch", "arch_dr", "arch_estat",
+ "arch_p", "archlm", "areg", "areg_p", "args", "arima",
+ "arima_dr", "arima_estat", "arima_p", "as", "asmprobit",
+ "asmprobit_estat", "asmprobit_lf", "asmprobit_mfx__dlg",
+ "asmprobit_p", "ass", "asse", "asser", "assert", "avplot",
+ "avplot_7", "avplots", "avplots_7", "bcskew0", "bgodfrey",
+ "binreg", "bip0_lf", "biplot", "bipp_lf", "bipr_lf",
+ "bipr_p", "biprobit", "bitest", "bitesti", "bitowt", "blogit",
+ "bmemsize", "boot", "bootsamp", "bootstrap", "bootstrap_8",
+ "boxco_l", "boxco_p", "boxcox", "boxcox_6", "boxcox_p",
+ "bprobit", "br", "break", "brier", "bro", "brow", "brows",
+ "browse", "brr", "brrstat", "bs", "bs_7", "bsampl_w",
+ "bsample", "bsample_7", "bsqreg", "bstat", "bstat_7", "bstat_8",
+ "bstrap", "bstrap_7", "ca", "ca_estat", "ca_p", "cabiplot",
+ "camat", "canon", "canon_8", "canon_8_p", "canon_estat",
+ "canon_p", "cap", "caprojection", "capt", "captu", "captur",
+ "capture", "cat", "cc", "cchart", "cchart_7", "cci",
+ "cd", "censobs_table", "centile", "cf", "char", "chdir",
+ "checkdlgfiles", "checkestimationsample", "checkhlpfiles",
+ "checksum", "chelp", "ci", "cii", "cl", "class", "classutil",
+ "clear", "cli", "clis", "clist", "clo", "clog", "clog_lf",
+ "clog_p", "clogi", "clogi_sw", "clogit", "clogit_lf",
+ "clogit_p", "clogitp", "clogl_sw", "cloglog", "clonevar",
+ "clslistarray", "cluster", "cluster_measures", "cluster_stop",
+ "cluster_tree", "cluster_tree_8", "clustermat", "cmdlog",
+ "cnr", "cnre", "cnreg", "cnreg_p", "cnreg_sw", "cnsreg",
+ "codebook", "collaps4", "collapse", "colormult_nb",
+ "colormult_nw", "compare", "compress", "conf", "confi",
+ "confir", "confirm", "conren", "cons", "const", "constr",
+ "constra", "constrai", "constrain", "constraint", "continue",
+ "contract", "copy", "copyright", "copysource", "cor", "corc",
+ "corr", "corr2data", "corr_anti", "corr_kmo", "corr_smc",
+ "corre", "correl", "correla", "correlat", "correlate",
+ "corrgram", "cou", "coun", "count", "cox", "cox_p", "cox_sw",
+ "coxbase", "coxhaz", "coxvar", "cprplot", "cprplot_7",
+ "crc", "cret", "cretu", "cretur", "creturn", "cross", "cs",
+ "cscript", "cscript_log", "csi", "ct", "ct_is", "ctset",
+ "ctst_5", "ctst_st", "cttost", "cumsp", "cumsp_7", "cumul",
+ "cusum", "cusum_7", "cutil", "d", "datasig", "datasign",
+ "datasigna", "datasignat", "datasignatu", "datasignatur",
+ "datasignature", "datetof", "db", "dbeta", "de", "dec",
+ "deco", "decod", "decode", "deff", "des", "desc", "descr",
+ "descri", "describ", "describe", "destring", "dfbeta",
+ "dfgls", "dfuller", "di", "di_g", "dir", "dirstats", "dis",
+ "discard", "disp", "disp_res", "disp_s", "displ", "displa",
+ "display", "distinct", "do", "doe", "doed", "doedi",
+ "doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm",
+ "drop", "ds", "ds_util", "dstdize", "duplicates", "durbina",
+ "dwstat", "dydx", "e", "ed", "edi", "edit", "egen",
+ "eivreg", "emdef", "end", "en", "enc", "enco", "encod", "encode",
+ "eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw",
+ "ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp",
+ "ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret",
+ "eretu", "eretur", "ereturn", "err", "erro", "error", "est",
+ "est_cfexist", "est_cfname", "est_clickable", "est_expand",
+ "est_hold", "est_table", "est_unhold", "est_unholdok",
+ "estat", "estat_default", "estat_summ", "estat_vce_only",
+ "esti", "estimates", "etodow", "etof", "etomdy", "ex",
+ "exi", "exit", "expand", "expandcl", "fac", "fact", "facto",
+ "factor", "factor_estat", "factor_p", "factor_pca_rotated",
+ "factor_rotate", "factormat", "fcast", "fcast_compute",
+ "fcast_graph", "fdades", "fdadesc", "fdadescr", "fdadescri",
+ "fdadescrib", "fdadescribe", "fdasav", "fdasave", "fdause",
+ "fh_st", "open", "read", "close",
+ "file", "filefilter", "fillin", "find_hlp_file", "findfile",
+ "findit", "findit_7", "fit", "fl", "fli", "flis", "flist",
+ "for5_0", "form", "forma", "format", "fpredict", "frac_154",
+ "frac_adj", "frac_chk", "frac_cox", "frac_ddp", "frac_dis",
+ "frac_dv", "frac_in", "frac_mun", "frac_pp", "frac_pq",
+ "frac_pv", "frac_wgt", "frac_xo", "fracgen", "fracplot",
+ "fracplot_7", "fracpoly", "fracpred", "fron_ex", "fron_hn",
+ "fron_p", "fron_tn", "fron_tn2", "frontier", "ftodate", "ftoe",
+ "ftomdy", "ftowdate", "g", "gamhet_glf", "gamhet_gp",
+ "gamhet_ilf", "gamhet_ip", "gamma", "gamma_d2", "gamma_p",
+ "gamma_sw", "gammahet", "gdi_hexagon", "gdi_spokes", "ge",
+ "gen", "gene", "gener", "genera", "generat", "generate",
+ "genrank", "genstd", "genvmean", "gettoken", "gl", "gladder",
+ "gladder_7", "glim_l01", "glim_l02", "glim_l03", "glim_l04",
+ "glim_l05", "glim_l06", "glim_l07", "glim_l08", "glim_l09",
+ "glim_l10", "glim_l11", "glim_l12", "glim_lf", "glim_mu",
+ "glim_nw1", "glim_nw2", "glim_nw3", "glim_p", "glim_v1",
+ "glim_v2", "glim_v3", "glim_v4", "glim_v5", "glim_v6",
+ "glim_v7", "glm", "glm_6", "glm_p", "glm_sw", "glmpred", "glo",
+ "glob", "globa", "global", "glogit", "glogit_8", "glogit_p",
+ "gmeans", "gnbre_lf", "gnbreg", "gnbreg_5", "gnbreg_p",
+ "gomp_lf", "gompe_sw", "gomper_p", "gompertz", "gompertzhet",
+ "gomphet_glf", "gomphet_glf_sh", "gomphet_gp", "gomphet_ilf",
+ "gomphet_ilf_sh", "gomphet_ip", "gphdot", "gphpen",
+ "gphprint", "gprefs", "gprobi_p", "gprobit", "gprobit_8", "gr",
+ "gr7", "gr_copy", "gr_current", "gr_db", "gr_describe",
+ "gr_dir", "gr_draw", "gr_draw_replay", "gr_drop", "gr_edit",
+ "gr_editviewopts", "gr_example", "gr_example2", "gr_export",
+ "gr_print", "gr_qscheme", "gr_query", "gr_read", "gr_rename",
+ "gr_replay", "gr_save", "gr_set", "gr_setscheme", "gr_table",
+ "gr_undo", "gr_use", "graph", "graph7", "grebar", "greigen",
+ "greigen_7", "greigen_8", "grmeanby", "grmeanby_7",
+ "gs_fileinfo", "gs_filetype", "gs_graphinfo", "gs_stat",
+ "gsort", "gwood", "h", "hadimvo", "hareg", "hausman",
+ "haver", "he", "heck_d2", "heckma_p", "heckman", "heckp_lf",
+ "heckpr_p", "heckprob", "hel", "help", "hereg", "hetpr_lf",
+ "hetpr_p", "hetprob", "hettest", "hexdump", "hilite",
+ "hist", "hist_7", "histogram", "hlogit", "hlu", "hmeans",
+ "hotel", "hotelling", "hprobit", "hreg", "hsearch", "icd9",
+ "icd9_ff", "icd9p", "iis", "impute", "imtest", "inbase",
+ "include", "inf", "infi", "infil", "infile", "infix", "inp",
+ "inpu", "input", "ins", "insheet", "insp", "inspe",
+ "inspec", "inspect", "integ", "inten", "intreg", "intreg_7",
+ "intreg_p", "intrg2_ll", "intrg_ll", "intrg_ll2", "ipolate",
+ "iqreg", "ir", "irf", "irf_create", "irfm", "iri", "is_svy",
+ "is_svysum", "isid", "istdize", "ivprob_1_lf", "ivprob_lf",
+ "ivprobit", "ivprobit_p", "ivreg", "ivreg_footnote",
+ "ivtob_1_lf", "ivtob_lf", "ivtobit", "ivtobit_p", "jackknife",
+ "jacknife", "jknife", "jknife_6", "jknife_8", "jkstat",
+ "joinby", "kalarma1", "kap", "kap_3", "kapmeier", "kappa",
+ "kapwgt", "kdensity", "kdensity_7", "keep", "ksm", "ksmirnov",
+ "ktau", "kwallis", "l", "la", "lab", "labe", "label",
+ "labelbook", "ladder", "levels", "levelsof", "leverage",
+ "lfit", "lfit_p", "li", "lincom", "line", "linktest",
+ "lis", "list", "lloghet_glf", "lloghet_glf_sh", "lloghet_gp",
+ "lloghet_ilf", "lloghet_ilf_sh", "lloghet_ip", "llogi_sw",
+ "llogis_p", "llogist", "llogistic", "llogistichet",
+ "lnorm_lf", "lnorm_sw", "lnorma_p", "lnormal", "lnormalhet",
+ "lnormhet_glf", "lnormhet_glf_sh", "lnormhet_gp",
+ "lnormhet_ilf", "lnormhet_ilf_sh", "lnormhet_ip", "lnskew0",
+ "loadingplot", "loc", "loca", "local", "log", "logi",
+ "logis_lf", "logistic", "logistic_p", "logit", "logit_estat",
+ "logit_p", "loglogs", "logrank", "loneway", "lookfor",
+ "lookup", "lowess", "lowess_7", "lpredict", "lrecomp", "lroc",
+ "lroc_7", "lrtest", "ls", "lsens", "lsens_7", "lsens_x",
+ "lstat", "ltable", "ltable_7", "ltriang", "lv", "lvr2plot",
+ "lvr2plot_7", "m", "ma", "mac", "macr", "macro", "makecns",
+ "man", "manova", "manova_estat", "manova_p", "manovatest",
+ "mantel", "mark", "markin", "markout", "marksample", "mat",
+ "mat_capp", "mat_order", "mat_put_rr", "mat_rapp", "mata",
+ "mata_clear", "mata_describe", "mata_drop", "mata_matdescribe",
+ "mata_matsave", "mata_matuse", "mata_memory", "mata_mlib",
+ "mata_mosave", "mata_rename", "mata_which", "matalabel",
+ "matcproc", "matlist", "matname", "matr", "matri",
+ "matrix", "matrix_input__dlg", "matstrik", "mcc", "mcci",
+ "md0_", "md1_", "md1debug_", "md2_", "md2debug_", "mds",
+ "mds_estat", "mds_p", "mdsconfig", "mdslong", "mdsmat",
+ "mdsshepard", "mdytoe", "mdytof", "me_derd", "mean",
+ "means", "median", "memory", "memsize", "meqparse", "mer",
+ "merg", "merge", "mfp", "mfx", "mhelp", "mhodds", "minbound",
+ "mixed_ll", "mixed_ll_reparm", "mkassert", "mkdir",
+ "mkmat", "mkspline", "ml", "ml_5", "ml_adjs", "ml_bhhhs",
+ "ml_c_d", "ml_check", "ml_clear", "ml_cnt", "ml_debug",
+ "ml_defd", "ml_e0", "ml_e0_bfgs", "ml_e0_cycle", "ml_e0_dfp",
+ "ml_e0i", "ml_e1", "ml_e1_bfgs", "ml_e1_bhhh", "ml_e1_cycle",
+ "ml_e1_dfp", "ml_e2", "ml_e2_cycle", "ml_ebfg0", "ml_ebfr0",
+ "ml_ebfr1", "ml_ebh0q", "ml_ebhh0", "ml_ebhr0", "ml_ebr0i",
+ "ml_ecr0i", "ml_edfp0", "ml_edfr0", "ml_edfr1", "ml_edr0i",
+ "ml_eds", "ml_eer0i", "ml_egr0i", "ml_elf", "ml_elf_bfgs",
+ "ml_elf_bhhh", "ml_elf_cycle", "ml_elf_dfp", "ml_elfi",
+ "ml_elfs", "ml_enr0i", "ml_enrr0", "ml_erdu0", "ml_erdu0_bfgs",
+ "ml_erdu0_bhhh", "ml_erdu0_bhhhq", "ml_erdu0_cycle",
+ "ml_erdu0_dfp", "ml_erdu0_nrbfgs", "ml_exde", "ml_footnote",
+ "ml_geqnr", "ml_grad0", "ml_graph", "ml_hbhhh", "ml_hd0",
+ "ml_hold", "ml_init", "ml_inv", "ml_log", "ml_max",
+ "ml_mlout", "ml_mlout_8", "ml_model", "ml_nb0", "ml_opt",
+ "ml_p", "ml_plot", "ml_query", "ml_rdgrd", "ml_repor",
+ "ml_s_e", "ml_score", "ml_searc", "ml_technique", "ml_unhold",
+ "mleval", "mlf_", "mlmatbysum", "mlmatsum", "mlog", "mlogi",
+ "mlogit", "mlogit_footnote", "mlogit_p", "mlopts", "mlsum",
+ "mlvecsum", "mnl0_", "mor", "more", "mov", "move", "mprobit",
+ "mprobit_lf", "mprobit_p", "mrdu0_", "mrdu1_", "mvdecode",
+ "mvencode", "mvreg", "mvreg_estat", "n", "nbreg",
+ "nbreg_al", "nbreg_lf", "nbreg_p", "nbreg_sw", "nestreg", "net",
+ "newey", "newey_7", "newey_p", "news", "nl", "nl_7", "nl_9",
+ "nl_9_p", "nl_p", "nl_p_7", "nlcom", "nlcom_p", "nlexp2",
+ "nlexp2_7", "nlexp2a", "nlexp2a_7", "nlexp3", "nlexp3_7",
+ "nlgom3", "nlgom3_7", "nlgom4", "nlgom4_7", "nlinit", "nllog3",
+ "nllog3_7", "nllog4", "nllog4_7", "nlog_rd", "nlogit",
+ "nlogit_p", "nlogitgen", "nlogittree", "nlpred", "no",
+ "nobreak", "noi", "nois", "noisi", "noisil", "noisily", "note",
+ "notes", "notes_dlg", "nptrend", "numlabel", "numlist", "odbc",
+ "old_ver", "olo", "olog", "ologi", "ologi_sw", "ologit",
+ "ologit_p", "ologitp", "on", "one", "onew", "onewa", "oneway",
+ "op_colnm", "op_comp", "op_diff", "op_inv", "op_str", "opr",
+ "opro", "oprob", "oprob_sw", "oprobi", "oprobi_p", "oprobit",
+ "oprobitp", "opts_exclusive", "order", "orthog", "orthpoly",
+ "ou", "out", "outf", "outfi", "outfil", "outfile", "outs",
+ "outsh", "outshe", "outshee", "outsheet", "ovtest", "pac",
+ "pac_7", "palette", "parse", "parse_dissim", "pause", "pca",
+ "pca_8", "pca_display", "pca_estat", "pca_p", "pca_rotate",
+ "pcamat", "pchart", "pchart_7", "pchi", "pchi_7", "pcorr",
+ "pctile", "pentium", "pergram", "pergram_7", "permute",
+ "permute_8", "personal", "peto_st", "pkcollapse", "pkcross",
+ "pkequiv", "pkexamine", "pkexamine_7", "pkshape", "pksumm",
+ "pksumm_7", "pl", "plo", "plot", "plugin", "pnorm",
+ "pnorm_7", "poisgof", "poiss_lf", "poiss_sw", "poisso_p",
+ "poisson", "poisson_estat", "post", "postclose", "postfile",
+ "postutil", "pperron", "pr", "prais", "prais_e", "prais_e2",
+ "prais_p", "predict", "predictnl", "preserve", "print",
+ "pro", "prob", "probi", "probit", "probit_estat", "probit_p",
+ "proc_time", "procoverlay", "procrustes", "procrustes_estat",
+ "procrustes_p", "profiler", "prog", "progr", "progra",
+ "program", "prop", "proportion", "prtest", "prtesti", "pwcorr",
+ "pwd", "q", "s", "qby", "qbys", "qchi", "qchi_7", "qladder",
+ "qladder_7", "qnorm", "qnorm_7", "qqplot", "qqplot_7", "qreg",
+ "qreg_c", "qreg_p", "qreg_sw", "qu", "quadchk", "quantile",
+ "quantile_7", "que", "quer", "query", "range", "ranksum",
+ "ratio", "rchart", "rchart_7", "rcof", "recast", "reclink",
+ "recode", "reg", "reg3", "reg3_p", "regdw", "regr", "regre",
+ "regre_p2", "regres", "regres_p", "regress", "regress_estat",
+ "regriv_p", "remap", "ren", "rena", "renam", "rename",
+ "renpfix", "repeat", "replace", "report", "reshape",
+ "restore", "ret", "retu", "retur", "return", "rm", "rmdir",
+ "robvar", "roccomp", "roccomp_7", "roccomp_8", "rocf_lf",
+ "rocfit", "rocfit_8", "rocgold", "rocplot", "rocplot_7",
+ "roctab", "roctab_7", "rolling", "rologit", "rologit_p",
+ "rot", "rota", "rotat", "rotate", "rotatemat", "rreg",
+ "rreg_p", "ru", "run", "runtest", "rvfplot", "rvfplot_7",
+ "rvpplot", "rvpplot_7", "sa", "safesum", "sample",
+ "sampsi", "sav", "save", "savedresults", "saveold", "sc",
+ "sca", "scal", "scala", "scalar", "scatter", "scm_mine",
+ "sco", "scob_lf", "scob_p", "scobi_sw", "scobit", "scor",
+ "score", "scoreplot", "scoreplot_help", "scree", "screeplot",
+ "screeplot_help", "sdtest", "sdtesti", "se", "search",
+ "separate", "seperate", "serrbar", "serrbar_7", "serset", "set",
+ "set_defaults", "sfrancia", "sh", "she", "shel", "shell",
+ "shewhart", "shewhart_7", "signestimationsample", "signrank",
+ "signtest", "simul", "simul_7", "simulate", "simulate_8",
+ "sktest", "sleep", "slogit", "slogit_d2", "slogit_p", "smooth",
+ "snapspan", "so", "sor", "sort", "spearman", "spikeplot",
+ "spikeplot_7", "spikeplt", "spline_x", "split", "sqreg",
+ "sqreg_p", "sret", "sretu", "sretur", "sreturn", "ssc", "st",
+ "st_ct", "st_hc", "st_hcd", "st_hcd_sh", "st_is", "st_issys",
+ "st_note", "st_promo", "st_set", "st_show", "st_smpl",
+ "st_subid", "stack", "statsby", "statsby_8", "stbase", "stci",
+ "stci_7", "stcox", "stcox_estat", "stcox_fr", "stcox_fr_ll",
+ "stcox_p", "stcox_sw", "stcoxkm", "stcoxkm_7", "stcstat",
+ "stcurv", "stcurve", "stcurve_7", "stdes", "stem", "stepwise",
+ "stereg", "stfill", "stgen", "stir", "stjoin", "stmc", "stmh",
+ "stphplot", "stphplot_7", "stphtest", "stphtest_7",
+ "stptime", "strate", "strate_7", "streg", "streg_sw", "streset",
+ "sts", "sts_7", "stset", "stsplit", "stsum", "sttocc",
+ "sttoct", "stvary", "stweib", "su", "suest", "suest_8",
+ "sum", "summ", "summa", "summar", "summari", "summariz",
+ "summarize", "sunflower", "sureg", "survcurv", "survsum",
+ "svar", "svar_p", "svmat", "svy", "svy_disp", "svy_dreg",
+ "svy_est", "svy_est_7", "svy_estat", "svy_get", "svy_gnbreg_p",
+ "svy_head", "svy_header", "svy_heckman_p", "svy_heckprob_p",
+ "svy_intreg_p", "svy_ivreg_p", "svy_logistic_p", "svy_logit_p",
+ "svy_mlogit_p", "svy_nbreg_p", "svy_ologit_p", "svy_oprobit_p",
+ "svy_poisson_p", "svy_probit_p", "svy_regress_p", "svy_sub",
+ "svy_sub_7", "svy_x", "svy_x_7", "svy_x_p", "svydes",
+ "svydes_8", "svygen", "svygnbreg", "svyheckman", "svyheckprob",
+ "svyintreg", "svyintreg_7", "svyintrg", "svyivreg", "svylc",
+ "svylog_p", "svylogit", "svymarkout", "svymarkout_8",
+ "svymean", "svymlog", "svymlogit", "svynbreg", "svyolog",
+ "svyologit", "svyoprob", "svyoprobit", "svyopts",
+ "svypois", "svypois_7", "svypoisson", "svyprobit", "svyprobt",
+ "svyprop", "svyprop_7", "svyratio", "svyreg", "svyreg_p",
+ "svyregress", "svyset", "svyset_7", "svyset_8", "svytab",
+ "svytab_7", "svytest", "svytotal", "sw", "sw_8", "swcnreg",
+ "swcox", "swereg", "swilk", "swlogis", "swlogit",
+ "swologit", "swoprbt", "swpois", "swprobit", "swqreg",
+ "swtobit", "swweib", "symmetry", "symmi", "symplot",
+ "symplot_7", "syntax", "sysdescribe", "sysdir", "sysuse",
+ "szroeter", "ta", "tab", "tab1", "tab2", "tab_or", "tabd",
+ "tabdi", "tabdis", "tabdisp", "tabi", "table", "tabodds",
+ "tabodds_7", "tabstat", "tabu", "tabul", "tabula", "tabulat",
+ "tabulate", "te", "tempfile", "tempname", "tempvar", "tes",
+ "test", "testnl", "testparm", "teststd", "tetrachoric",
+ "time_it", "timer", "tis", "tob", "tobi", "tobit", "tobit_p",
+ "tobit_sw", "token", "tokeni", "tokeniz", "tokenize",
+ "tostring", "total", "translate", "translator", "transmap",
+ "treat_ll", "treatr_p", "treatreg", "trim", "trnb_cons",
+ "trnb_mean", "trpoiss_d2", "trunc_ll", "truncr_p", "truncreg",
+ "tsappend", "tset", "tsfill", "tsline", "tsline_ex",
+ "tsreport", "tsrevar", "tsrline", "tsset", "tssmooth",
+ "tsunab", "ttest", "ttesti", "tut_chk", "tut_wait", "tutorial",
+ "tw", "tware_st", "two", "twoway", "twoway__fpfit_serset",
+ "twoway__function_gen", "twoway__histogram_gen",
+ "twoway__ipoint_serset", "twoway__ipoints_serset",
+ "twoway__kdensity_gen", "twoway__lfit_serset",
+ "twoway__normgen_gen", "twoway__pci_serset",
+ "twoway__qfit_serset", "twoway__scatteri_serset",
+ "twoway__sunflower_gen", "twoway_ksm_serset", "ty", "typ",
+ "type", "typeof", "u", "unab", "unabbrev", "unabcmd",
+ "update", "us", "use", "uselabel", "var", "var_mkcompanion",
+ "var_p", "varbasic", "varfcast", "vargranger", "varirf",
+ "varirf_add", "varirf_cgraph", "varirf_create", "varirf_ctable",
+ "varirf_describe", "varirf_dir", "varirf_drop", "varirf_erase",
+ "varirf_graph", "varirf_ograph", "varirf_rename", "varirf_set",
+ "varirf_table", "varlist", "varlmar", "varnorm", "varsoc",
+ "varstable", "varstable_w", "varstable_w2", "varwle",
+ "vce", "vec", "vec_fevd", "vec_mkphi", "vec_p", "vec_p_w",
+ "vecirf_create", "veclmar", "veclmar_w", "vecnorm",
+ "vecnorm_w", "vecrank", "vecstable", "verinst", "vers",
+ "versi", "versio", "version", "view", "viewsource", "vif",
+ "vwls", "wdatetof", "webdescribe", "webseek", "webuse",
+ "weib1_lf", "weib2_lf", "weib_lf", "weib_lf0", "weibhet_glf",
+ "weibhet_glf_sh", "weibhet_glfa", "weibhet_glfa_sh",
+ "weibhet_gp", "weibhet_ilf", "weibhet_ilf_sh", "weibhet_ilfa",
+ "weibhet_ilfa_sh", "weibhet_ip", "weibu_sw", "weibul_p",
+ "weibull", "weibull_c", "weibull_s", "weibullhet",
+ "wh", "whelp", "whi", "which", "whil", "while", "wilc_st",
+ "wilcoxon", "win", "wind", "windo", "window", "winexec",
+ "wntestb", "wntestb_7", "wntestq", "xchart", "xchart_7",
+ "xcorr", "xcorr_7", "xi", "xi_6", "xmlsav", "xmlsave",
+ "xmluse", "xpose", "xsh", "xshe", "xshel", "xshell",
+ "xt_iis", "xt_tis", "xtab_p", "xtabond", "xtbin_p",
+ "xtclog", "xtcloglog", "xtcloglog_8", "xtcloglog_d2",
+ "xtcloglog_pa_p", "xtcloglog_re_p", "xtcnt_p", "xtcorr",
+ "xtdata", "xtdes", "xtfront_p", "xtfrontier", "xtgee",
+ "xtgee_elink", "xtgee_estat", "xtgee_makeivar", "xtgee_p",
+ "xtgee_plink", "xtgls", "xtgls_p", "xthaus", "xthausman",
+ "xtht_p", "xthtaylor", "xtile", "xtint_p", "xtintreg",
+ "xtintreg_8", "xtintreg_d2", "xtintreg_p", "xtivp_1",
+ "xtivp_2", "xtivreg", "xtline", "xtline_ex", "xtlogit",
+ "xtlogit_8", "xtlogit_d2", "xtlogit_fe_p", "xtlogit_pa_p",
+ "xtlogit_re_p", "xtmixed", "xtmixed_estat", "xtmixed_p",
+ "xtnb_fe", "xtnb_lf", "xtnbreg", "xtnbreg_pa_p",
+ "xtnbreg_refe_p", "xtpcse", "xtpcse_p", "xtpois", "xtpoisson",
+ "xtpoisson_d2", "xtpoisson_pa_p", "xtpoisson_refe_p", "xtpred",
+ "xtprobit", "xtprobit_8", "xtprobit_d2", "xtprobit_re_p",
+ "xtps_fe", "xtps_lf", "xtps_ren", "xtps_ren_8", "xtrar_p",
+ "xtrc", "xtrc_p", "xtrchh", "xtrefe_p", "xtreg", "xtreg_be",
+ "xtreg_fe", "xtreg_ml", "xtreg_pa_p", "xtreg_re",
+ "xtregar", "xtrere_p", "xtset", "xtsf_ll", "xtsf_llti",
+ "xtsum", "xttab", "xttest0", "xttobit", "xttobit_8",
+ "xttobit_p", "xttrans", "yx", "yxview__barlike_draw",
+ "yxview_area_draw", "yxview_bar_draw", "yxview_dot_draw",
+ "yxview_dropline_draw", "yxview_function_draw",
+ "yxview_iarrow_draw", "yxview_ilabels_draw",
+ "yxview_normal_draw", "yxview_pcarrow_draw",
+ "yxview_pcbarrow_draw", "yxview_pccapsym_draw",
+ "yxview_pcscatter_draw", "yxview_pcspike_draw",
+ "yxview_rarea_draw", "yxview_rbar_draw", "yxview_rbarm_draw",
+ "yxview_rcap_draw", "yxview_rcapsym_draw",
+ "yxview_rconnected_draw", "yxview_rline_draw",
+ "yxview_rscatter_draw", "yxview_rspike_draw",
+ "yxview_spike_draw", "yxview_sunflower_draw", "zap_s", "zinb",
+ "zinb_llf", "zinb_plf", "zip", "zip_llf", "zip_p", "zip_plf",
+ "zt_ct_5", "zt_hc_5", "zt_hcd_5", "zt_is_5", "zt_iss_5",
+ "zt_sho_5", "zt_smp_5", "ztbase_5", "ztcox_5", "ztdes_5",
+ "ztereg_5", "ztfill_5", "ztgen_5", "ztir_5", "ztjoin_5", "ztnb",
+ "ztnb_p", "ztp", "ztp_p", "zts_5", "ztset_5", "ztspli_5",
+ "ztsum_5", "zttoct_5", "ztvary_5", "ztweib_5"
+)
+
+
+
+builtins_functions = (
+ "abbrev", "abs", "acos", "acosh", "asin", "asinh", "atan",
+ "atan2", "atanh", "autocode", "betaden", "binomial",
+ "binomialp", "binomialtail", "binormal", "bofd",
+ "byteorder", "c", "_caller", "cauchy", "cauchyden",
+ "cauchytail", "Cdhms", "ceil", "char", "chi2", "chi2den",
+ "chi2tail", "Chms", "chop", "cholesky", "clip", "Clock",
+ "clock", "cloglog", "Cmdyhms", "Cofc", "cofC", "Cofd", "cofd",
+ "coleqnumb", "collatorlocale", "collatorversion",
+ "colnfreeparms", "colnumb", "colsof", "comb", "cond", "corr",
+ "cos", "cosh", "daily", "date", "day", "det", "dgammapda",
+ "dgammapdada", "dgammapdadx", "dgammapdx", "dgammapdxdx",
+ "dhms", "diag", "diag0cnt", "digamma", "dofb", "dofC", "dofc",
+ "dofh", "dofm", "dofq", "dofw", "dofy", "dow", "doy",
+ "dunnettprob", "e", "el", "esample", "epsdouble", "epsfloat",
+ "exp", "expm1", "exponential", "exponentialden",
+ "exponentialtail", "F", "Fden", "fileexists", "fileread",
+ "filereaderror", "filewrite", "float", "floor", "fmtwidth",
+ "frval", "_frval", "Ftail", "gammaden", "gammap", "gammaptail",
+ "get", "hadamard", "halfyear", "halfyearly", "has_eprop", "hh",
+ "hhC", "hms", "hofd", "hours", "hypergeometric",
+ "hypergeometricp", "I", "ibeta", "ibetatail", "igaussian",
+ "igaussianden", "igaussiantail", "indexnot", "inlist",
+ "inrange", "int", "inv", "invbinomial", "invbinomialtail",
+ "invcauchy", "invcauchytail", "invchi2", "invchi2tail",
+ "invcloglog", "invdunnettprob", "invexponential",
+ "invexponentialtail", "invF", "invFtail", "invgammap",
+ "invgammaptail", "invibeta", "invibetatail", "invigaussian",
+ "invigaussiantail", "invlaplace", "invlaplacetail",
+ "invlogisticp", "invlogisticsp", "invlogisticmsp",
+ "invlogistictailp", "invlogistictailsp", "invlogistictailmsp",
+ "invlogit", "invnbinomial", "invnbinomialtail", "invnchi2",
+ "invnchi2tail", "invnF", "invnFtail", "invnibeta",
+ "invnormal", "invnt", "invnttail", "invpoisson",
+ "invpoissontail", "invsym", "invt", "invttail", "invtukeyprob",
+ "invweibullabp", "invweibullabgp", "invweibullphabp",
+ "invweibullphabgp", "invweibullphtailabp",
+ "invweibullphtailabgp", "invweibulltailabp",
+ "invweibulltailabgp", "irecode", "issymmetric", "J", "laplace",
+ "laplaceden", "laplacetail", "ln", "ln1m", "ln1p", "lncauchyden",
+ "lnfactorial", "lngamma", "lnigammaden", "lnigaussianden",
+ "lniwishartden", "lnlaplaceden", "lnmvnormalden", "lnnormal",
+ "lnnormalden", "lnnormaldenxs", "lnnormaldenxms", "lnwishartden",
+ "log", "log10", "log1m", "log1p", "logisticx", "logisticsx",
+ "logisticmsx", "logisticdenx", "logisticdensx", "logisticdenmsx",
+ "logistictailx", "logistictailsx", "logistictailmsx", "logit",
+ "matmissing", "matrix", "matuniform", "max", "maxbyte",
+ "maxdouble", "maxfloat", "maxint", "maxlong", "mdy", "mdyhms",
+ "mi", "min", "minbyte", "mindouble", "minfloat", "minint",
+ "minlong", "minutes", "missing", "mm", "mmC", "mod", "mofd",
+ "month", "monthly", "mreldif", "msofhours", "msofminutes",
+ "msofseconds", "nbetaden", "nbinomial", "nbinomialp",
+ "nbinomialtail", "nchi2", "nchi2den", "nchi2tail", "nF",
+ "nFden", "nFtail", "nibeta", "normal", "normalden",
+ "normaldenxs", "normaldenxms", "npnchi2", "npnF", "npnt",
+ "nt", "ntden", "nttail", "nullmat", "plural", "plurals1",
+ "poisson", "poissonp", "poissontail", "qofd", "quarter",
+ "quarterly", "r", "rbeta", "rbinomial", "rcauchy", "rchi2",
+ "recode", "real", "regexm", "regexr", "regexs", "reldif",
+ "replay", "return", "rexponential", "rgamma", "rhypergeometric",
+ "rigaussian", "rlaplace", "rlogistic", "rlogistics",
+ "rlogisticms", "rnbinomial", "rnormal", "rnormalm", "rnormalms",
+ "round", "roweqnumb", "rownfreeparms", "rownumb", "rowsof",
+ "rpoisson", "rt", "runiform", "runiformab", "runiformint",
+ "rweibullab", "rweibullabg", "rweibullphab", "rweibullphabg",
+ "s", "scalar", "seconds", "sign", "sin", "sinh",
+ "smallestdouble", "soundex", "soundex_nara", "sqrt", "ss",
+ "ssC", "strcat", "strdup", "string", "stringns", "stritrim",
+ "strlen", "strlower", "strltrim", "strmatch", "strofreal",
+ "strofrealns", "strpos", "strproper", "strreverse", "strrpos",
+ "strrtrim", "strtoname", "strtrim", "strupper", "subinstr",
+ "subinword", "substr", "sum", "sweep", "t", "tan", "tanh",
+ "tC", "tc", "td", "tden", "th", "tin", "tm", "tobytes", "tq",
+ "trace", "trigamma", "trunc", "ttail", "tukeyprob", "tw",
+ "twithin", "uchar", "udstrlen", "udsubstr", "uisdigit",
+ "uisletter", "ustrcompare", "ustrfix", "ustrfrom",
+ "ustrinvalidcnt", "ustrleft", "ustrlen", "ustrlower",
+ "ustrltrim", "ustrnormalize", "ustrpos", "ustrregexm",
+ "ustrregexra", "ustrregexrf", "ustrregexs", "ustrreverse",
+ "ustrright", "ustrrpos", "ustrrtrim", "ustrsortkey",
+ "ustrtitle", "ustrto", "ustrtohex", "ustrtoname",
+ "ustrtrim", "ustrunescape", "ustrupper", "ustrword",
+ "ustrwordcount", "usubinstr", "usubstr", "vec", "vecdiag",
+ "week", "weekly", "weibullabx", "weibullabgx", "weibulldenabx",
+ "weibulldenabgx", "weibullphabx", "weibullphabgx",
+ "weibullphdenabx", "weibullphdenabgx", "weibullphtailabx",
+ "weibullphtailabgx", "weibulltailabx", "weibulltailabgx",
+ "wofd", "word", "wordbreaklocale", "wordcount",
+ "year", "yearly", "yh", "ym", "yofd", "yq", "yw"
+)
diff --git a/pygments/lexers/_tsql_builtins.py b/pygments/lexers/_tsql_builtins.py
new file mode 100644
index 0000000..934e54a
--- /dev/null
+++ b/pygments/lexers/_tsql_builtins.py
@@ -0,0 +1,1003 @@
+"""
+ pygments.lexers._tsql_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ These are manually translated lists from https://msdn.microsoft.com.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# See https://msdn.microsoft.com/en-us/library/ms174986.aspx.
+OPERATORS = (
+ '!<',
+ '!=',
+ '!>',
+ '<',
+ '<=',
+ '<>',
+ '=',
+ '>',
+ '>=',
+ '+',
+ '+=',
+ '-',
+ '-=',
+ '*',
+ '*=',
+ '/',
+ '/=',
+ '%',
+ '%=',
+ '&',
+ '&=',
+ '|',
+ '|=',
+ '^',
+ '^=',
+ '~',
+ '::',
+)
+
+OPERATOR_WORDS = (
+ 'all',
+ 'and',
+ 'any',
+ 'between',
+ 'except',
+ 'exists',
+ 'in',
+ 'intersect',
+ 'like',
+ 'not',
+ 'or',
+ 'some',
+ 'union',
+)
+
+_KEYWORDS_SERVER = (
+ 'add',
+ 'all',
+ 'alter',
+ 'and',
+ 'any',
+ 'as',
+ 'asc',
+ 'authorization',
+ 'backup',
+ 'begin',
+ 'between',
+ 'break',
+ 'browse',
+ 'bulk',
+ 'by',
+ 'cascade',
+ 'case',
+ 'catch',
+ 'check',
+ 'checkpoint',
+ 'close',
+ 'clustered',
+ 'coalesce',
+ 'collate',
+ 'column',
+ 'commit',
+ 'compute',
+ 'constraint',
+ 'contains',
+ 'containstable',
+ 'continue',
+ 'convert',
+ 'create',
+ 'cross',
+ 'current',
+ 'current_date',
+ 'current_time',
+ 'current_timestamp',
+ 'current_user',
+ 'cursor',
+ 'database',
+ 'dbcc',
+ 'deallocate',
+ 'declare',
+ 'default',
+ 'delete',
+ 'deny',
+ 'desc',
+ 'disk',
+ 'distinct',
+ 'distributed',
+ 'double',
+ 'drop',
+ 'dump',
+ 'else',
+ 'end',
+ 'errlvl',
+ 'escape',
+ 'except',
+ 'exec',
+ 'execute',
+ 'exists',
+ 'exit',
+ 'external',
+ 'fetch',
+ 'file',
+ 'fillfactor',
+ 'for',
+ 'foreign',
+ 'freetext',
+ 'freetexttable',
+ 'from',
+ 'full',
+ 'function',
+ 'goto',
+ 'grant',
+ 'group',
+ 'having',
+ 'holdlock',
+ 'identity',
+ 'identity_insert',
+ 'identitycol',
+ 'if',
+ 'in',
+ 'index',
+ 'inner',
+ 'insert',
+ 'intersect',
+ 'into',
+ 'is',
+ 'join',
+ 'key',
+ 'kill',
+ 'left',
+ 'like',
+ 'lineno',
+ 'load',
+ 'merge',
+ 'national',
+ 'nocheck',
+ 'nonclustered',
+ 'not',
+ 'null',
+ 'nullif',
+ 'of',
+ 'off',
+ 'offsets',
+ 'on',
+ 'open',
+ 'opendatasource',
+ 'openquery',
+ 'openrowset',
+ 'openxml',
+ 'option',
+ 'or',
+ 'order',
+ 'outer',
+ 'over',
+ 'percent',
+ 'pivot',
+ 'plan',
+ 'precision',
+ 'primary',
+ 'print',
+ 'proc',
+ 'procedure',
+ 'public',
+ 'raiserror',
+ 'read',
+ 'readtext',
+ 'reconfigure',
+ 'references',
+ 'replication',
+ 'restore',
+ 'restrict',
+ 'return',
+ 'revert',
+ 'revoke',
+ 'right',
+ 'rollback',
+ 'rowcount',
+ 'rowguidcol',
+ 'rule',
+ 'save',
+ 'schema',
+ 'securityaudit',
+ 'select',
+ 'semantickeyphrasetable',
+ 'semanticsimilaritydetailstable',
+ 'semanticsimilaritytable',
+ 'session_user',
+ 'set',
+ 'setuser',
+ 'shutdown',
+ 'some',
+ 'statistics',
+ 'system_user',
+ 'table',
+ 'tablesample',
+ 'textsize',
+ 'then',
+ 'throw',
+ 'to',
+ 'top',
+ 'tran',
+ 'transaction',
+ 'trigger',
+ 'truncate',
+ 'try',
+ 'try_convert',
+ 'tsequal',
+ 'union',
+ 'unique',
+ 'unpivot',
+ 'update',
+ 'updatetext',
+ 'use',
+ 'user',
+ 'values',
+ 'varying',
+ 'view',
+ 'waitfor',
+ 'when',
+ 'where',
+ 'while',
+ 'with',
+ 'within',
+ 'writetext',
+)
+
+_KEYWORDS_FUTURE = (
+ 'absolute',
+ 'action',
+ 'admin',
+ 'after',
+ 'aggregate',
+ 'alias',
+ 'allocate',
+ 'are',
+ 'array',
+ 'asensitive',
+ 'assertion',
+ 'asymmetric',
+ 'at',
+ 'atomic',
+ 'before',
+ 'binary',
+ 'bit',
+ 'blob',
+ 'boolean',
+ 'both',
+ 'breadth',
+ 'call',
+ 'called',
+ 'cardinality',
+ 'cascaded',
+ 'cast',
+ 'catalog',
+ 'char',
+ 'character',
+ 'class',
+ 'clob',
+ 'collation',
+ 'collect',
+ 'completion',
+ 'condition',
+ 'connect',
+ 'connection',
+ 'constraints',
+ 'constructor',
+ 'corr',
+ 'corresponding',
+ 'covar_pop',
+ 'covar_samp',
+ 'cube',
+ 'cume_dist',
+ 'current_catalog',
+ 'current_default_transform_group',
+ 'current_path',
+ 'current_role',
+ 'current_schema',
+ 'current_transform_group_for_type',
+ 'cycle',
+ 'data',
+ 'date',
+ 'day',
+ 'dec',
+ 'decimal',
+ 'deferrable',
+ 'deferred',
+ 'depth',
+ 'deref',
+ 'describe',
+ 'descriptor',
+ 'destroy',
+ 'destructor',
+ 'deterministic',
+ 'diagnostics',
+ 'dictionary',
+ 'disconnect',
+ 'domain',
+ 'dynamic',
+ 'each',
+ 'element',
+ 'end-exec',
+ 'equals',
+ 'every',
+ 'exception',
+ 'false',
+ 'filter',
+ 'first',
+ 'float',
+ 'found',
+ 'free',
+ 'fulltexttable',
+ 'fusion',
+ 'general',
+ 'get',
+ 'global',
+ 'go',
+ 'grouping',
+ 'hold',
+ 'host',
+ 'hour',
+ 'ignore',
+ 'immediate',
+ 'indicator',
+ 'initialize',
+ 'initially',
+ 'inout',
+ 'input',
+ 'int',
+ 'integer',
+ 'intersection',
+ 'interval',
+ 'isolation',
+ 'iterate',
+ 'language',
+ 'large',
+ 'last',
+ 'lateral',
+ 'leading',
+ 'less',
+ 'level',
+ 'like_regex',
+ 'limit',
+ 'ln',
+ 'local',
+ 'localtime',
+ 'localtimestamp',
+ 'locator',
+ 'map',
+ 'match',
+ 'member',
+ 'method',
+ 'minute',
+ 'mod',
+ 'modifies',
+ 'modify',
+ 'module',
+ 'month',
+ 'multiset',
+ 'names',
+ 'natural',
+ 'nchar',
+ 'nclob',
+ 'new',
+ 'next',
+ 'no',
+ 'none',
+ 'normalize',
+ 'numeric',
+ 'object',
+ 'occurrences_regex',
+ 'old',
+ 'only',
+ 'operation',
+ 'ordinality',
+ 'out',
+ 'output',
+ 'overlay',
+ 'pad',
+ 'parameter',
+ 'parameters',
+ 'partial',
+ 'partition',
+ 'path',
+ 'percent_rank',
+ 'percentile_cont',
+ 'percentile_disc',
+ 'position_regex',
+ 'postfix',
+ 'prefix',
+ 'preorder',
+ 'prepare',
+ 'preserve',
+ 'prior',
+ 'privileges',
+ 'range',
+ 'reads',
+ 'real',
+ 'recursive',
+ 'ref',
+ 'referencing',
+ 'regr_avgx',
+ 'regr_avgy',
+ 'regr_count',
+ 'regr_intercept',
+ 'regr_r2',
+ 'regr_slope',
+ 'regr_sxx',
+ 'regr_sxy',
+ 'regr_syy',
+ 'relative',
+ 'release',
+ 'result',
+ 'returns',
+ 'role',
+ 'rollup',
+ 'routine',
+ 'row',
+ 'rows',
+ 'savepoint',
+ 'scope',
+ 'scroll',
+ 'search',
+ 'second',
+ 'section',
+ 'sensitive',
+ 'sequence',
+ 'session',
+ 'sets',
+ 'similar',
+ 'size',
+ 'smallint',
+ 'space',
+ 'specific',
+ 'specifictype',
+ 'sql',
+ 'sqlexception',
+ 'sqlstate',
+ 'sqlwarning',
+ 'start',
+ 'state',
+ 'statement',
+ 'static',
+ 'stddev_pop',
+ 'stddev_samp',
+ 'structure',
+ 'submultiset',
+ 'substring_regex',
+ 'symmetric',
+ 'system',
+ 'temporary',
+ 'terminate',
+ 'than',
+ 'time',
+ 'timestamp',
+ 'timezone_hour',
+ 'timezone_minute',
+ 'trailing',
+ 'translate_regex',
+ 'translation',
+ 'treat',
+ 'true',
+ 'uescape',
+ 'under',
+ 'unknown',
+ 'unnest',
+ 'usage',
+ 'using',
+ 'value',
+ 'var_pop',
+ 'var_samp',
+ 'varchar',
+ 'variable',
+ 'whenever',
+ 'width_bucket',
+ 'window',
+ 'within',
+ 'without',
+ 'work',
+ 'write',
+ 'xmlagg',
+ 'xmlattributes',
+ 'xmlbinary',
+ 'xmlcast',
+ 'xmlcomment',
+ 'xmlconcat',
+ 'xmldocument',
+ 'xmlelement',
+ 'xmlexists',
+ 'xmlforest',
+ 'xmliterate',
+ 'xmlnamespaces',
+ 'xmlparse',
+ 'xmlpi',
+ 'xmlquery',
+ 'xmlserialize',
+ 'xmltable',
+ 'xmltext',
+ 'xmlvalidate',
+ 'year',
+ 'zone',
+)
+
+_KEYWORDS_ODBC = (
+ 'absolute',
+ 'action',
+ 'ada',
+ 'add',
+ 'all',
+ 'allocate',
+ 'alter',
+ 'and',
+ 'any',
+ 'are',
+ 'as',
+ 'asc',
+ 'assertion',
+ 'at',
+ 'authorization',
+ 'avg',
+ 'begin',
+ 'between',
+ 'bit',
+ 'bit_length',
+ 'both',
+ 'by',
+ 'cascade',
+ 'cascaded',
+ 'case',
+ 'cast',
+ 'catalog',
+ 'char',
+ 'char_length',
+ 'character',
+ 'character_length',
+ 'check',
+ 'close',
+ 'coalesce',
+ 'collate',
+ 'collation',
+ 'column',
+ 'commit',
+ 'connect',
+ 'connection',
+ 'constraint',
+ 'constraints',
+ 'continue',
+ 'convert',
+ 'corresponding',
+ 'count',
+ 'create',
+ 'cross',
+ 'current',
+ 'current_date',
+ 'current_time',
+ 'current_timestamp',
+ 'current_user',
+ 'cursor',
+ 'date',
+ 'day',
+ 'deallocate',
+ 'dec',
+ 'decimal',
+ 'declare',
+ 'default',
+ 'deferrable',
+ 'deferred',
+ 'delete',
+ 'desc',
+ 'describe',
+ 'descriptor',
+ 'diagnostics',
+ 'disconnect',
+ 'distinct',
+ 'domain',
+ 'double',
+ 'drop',
+ 'else',
+ 'end',
+ 'end-exec',
+ 'escape',
+ 'except',
+ 'exception',
+ 'exec',
+ 'execute',
+ 'exists',
+ 'external',
+ 'extract',
+ 'false',
+ 'fetch',
+ 'first',
+ 'float',
+ 'for',
+ 'foreign',
+ 'fortran',
+ 'found',
+ 'from',
+ 'full',
+ 'get',
+ 'global',
+ 'go',
+ 'goto',
+ 'grant',
+ 'group',
+ 'having',
+ 'hour',
+ 'identity',
+ 'immediate',
+ 'in',
+ 'include',
+ 'index',
+ 'indicator',
+ 'initially',
+ 'inner',
+ 'input',
+ 'insensitive',
+ 'insert',
+ 'int',
+ 'integer',
+ 'intersect',
+ 'interval',
+ 'into',
+ 'is',
+ 'isolation',
+ 'join',
+ 'key',
+ 'language',
+ 'last',
+ 'leading',
+ 'left',
+ 'level',
+ 'like',
+ 'local',
+ 'lower',
+ 'match',
+ 'max',
+ 'min',
+ 'minute',
+ 'module',
+ 'month',
+ 'names',
+ 'national',
+ 'natural',
+ 'nchar',
+ 'next',
+ 'no',
+ 'none',
+ 'not',
+ 'null',
+ 'nullif',
+ 'numeric',
+ 'octet_length',
+ 'of',
+ 'on',
+ 'only',
+ 'open',
+ 'option',
+ 'or',
+ 'order',
+ 'outer',
+ 'output',
+ 'overlaps',
+ 'pad',
+ 'partial',
+ 'pascal',
+ 'position',
+ 'precision',
+ 'prepare',
+ 'preserve',
+ 'primary',
+ 'prior',
+ 'privileges',
+ 'procedure',
+ 'public',
+ 'read',
+ 'real',
+ 'references',
+ 'relative',
+ 'restrict',
+ 'revoke',
+ 'right',
+ 'rollback',
+ 'rows',
+ 'schema',
+ 'scroll',
+ 'second',
+ 'section',
+ 'select',
+ 'session',
+ 'session_user',
+ 'set',
+ 'size',
+ 'smallint',
+ 'some',
+ 'space',
+ 'sql',
+ 'sqlca',
+ 'sqlcode',
+ 'sqlerror',
+ 'sqlstate',
+ 'sqlwarning',
+ 'substring',
+ 'sum',
+ 'system_user',
+ 'table',
+ 'temporary',
+ 'then',
+ 'time',
+ 'timestamp',
+ 'timezone_hour',
+ 'timezone_minute',
+ 'to',
+ 'trailing',
+ 'transaction',
+ 'translate',
+ 'translation',
+ 'trim',
+ 'true',
+ 'union',
+ 'unique',
+ 'unknown',
+ 'update',
+ 'upper',
+ 'usage',
+ 'user',
+ 'using',
+ 'value',
+ 'values',
+ 'varchar',
+ 'varying',
+ 'view',
+ 'when',
+ 'whenever',
+ 'where',
+ 'with',
+ 'work',
+ 'write',
+ 'year',
+ 'zone',
+)
+
+# See https://msdn.microsoft.com/en-us/library/ms189822.aspx.
+KEYWORDS = sorted(set(_KEYWORDS_FUTURE + _KEYWORDS_ODBC + _KEYWORDS_SERVER))
+
+# See https://msdn.microsoft.com/en-us/library/ms187752.aspx.
+TYPES = (
+ 'bigint',
+ 'binary',
+ 'bit',
+ 'char',
+ 'cursor',
+ 'date',
+ 'datetime',
+ 'datetime2',
+ 'datetimeoffset',
+ 'decimal',
+ 'float',
+ 'hierarchyid',
+ 'image',
+ 'int',
+ 'money',
+ 'nchar',
+ 'ntext',
+ 'numeric',
+ 'nvarchar',
+ 'real',
+ 'smalldatetime',
+ 'smallint',
+ 'smallmoney',
+ 'sql_variant',
+ 'table',
+ 'text',
+ 'time',
+ 'timestamp',
+ 'tinyint',
+ 'uniqueidentifier',
+ 'varbinary',
+ 'varchar',
+ 'xml',
+)
+
+# See https://msdn.microsoft.com/en-us/library/ms174318.aspx.
+FUNCTIONS = (
+ '$partition',
+ 'abs',
+ 'acos',
+ 'app_name',
+ 'applock_mode',
+ 'applock_test',
+ 'ascii',
+ 'asin',
+ 'assemblyproperty',
+ 'atan',
+ 'atn2',
+ 'avg',
+ 'binary_checksum',
+ 'cast',
+ 'ceiling',
+ 'certencoded',
+ 'certprivatekey',
+ 'char',
+ 'charindex',
+ 'checksum',
+ 'checksum_agg',
+ 'choose',
+ 'col_length',
+ 'col_name',
+ 'columnproperty',
+ 'compress',
+ 'concat',
+ 'connectionproperty',
+ 'context_info',
+ 'convert',
+ 'cos',
+ 'cot',
+ 'count',
+ 'count_big',
+ 'current_request_id',
+ 'current_timestamp',
+ 'current_transaction_id',
+ 'current_user',
+ 'cursor_status',
+ 'database_principal_id',
+ 'databasepropertyex',
+ 'dateadd',
+ 'datediff',
+ 'datediff_big',
+ 'datefromparts',
+ 'datename',
+ 'datepart',
+ 'datetime2fromparts',
+ 'datetimefromparts',
+ 'datetimeoffsetfromparts',
+ 'day',
+ 'db_id',
+ 'db_name',
+ 'decompress',
+ 'degrees',
+ 'dense_rank',
+ 'difference',
+ 'eomonth',
+ 'error_line',
+ 'error_message',
+ 'error_number',
+ 'error_procedure',
+ 'error_severity',
+ 'error_state',
+ 'exp',
+ 'file_id',
+ 'file_idex',
+ 'file_name',
+ 'filegroup_id',
+ 'filegroup_name',
+ 'filegroupproperty',
+ 'fileproperty',
+ 'floor',
+ 'format',
+ 'formatmessage',
+ 'fulltextcatalogproperty',
+ 'fulltextserviceproperty',
+ 'get_filestream_transaction_context',
+ 'getansinull',
+ 'getdate',
+ 'getutcdate',
+ 'grouping',
+ 'grouping_id',
+ 'has_perms_by_name',
+ 'host_id',
+ 'host_name',
+ 'iif',
+ 'index_col',
+ 'indexkey_property',
+ 'indexproperty',
+ 'is_member',
+ 'is_rolemember',
+ 'is_srvrolemember',
+ 'isdate',
+ 'isjson',
+ 'isnull',
+ 'isnumeric',
+ 'json_modify',
+ 'json_query',
+ 'json_value',
+ 'left',
+ 'len',
+ 'log',
+ 'log10',
+ 'lower',
+ 'ltrim',
+ 'max',
+ 'min',
+ 'min_active_rowversion',
+ 'month',
+ 'nchar',
+ 'newid',
+ 'newsequentialid',
+ 'ntile',
+ 'object_definition',
+ 'object_id',
+ 'object_name',
+ 'object_schema_name',
+ 'objectproperty',
+ 'objectpropertyex',
+ 'opendatasource',
+ 'openjson',
+ 'openquery',
+ 'openrowset',
+ 'openxml',
+ 'original_db_name',
+ 'original_login',
+ 'parse',
+ 'parsename',
+ 'patindex',
+ 'permissions',
+ 'pi',
+ 'power',
+ 'pwdcompare',
+ 'pwdencrypt',
+ 'quotename',
+ 'radians',
+ 'rand',
+ 'rank',
+ 'replace',
+ 'replicate',
+ 'reverse',
+ 'right',
+ 'round',
+ 'row_number',
+ 'rowcount_big',
+ 'rtrim',
+ 'schema_id',
+ 'schema_name',
+ 'scope_identity',
+ 'serverproperty',
+ 'session_context',
+ 'session_user',
+ 'sign',
+ 'sin',
+ 'smalldatetimefromparts',
+ 'soundex',
+ 'sp_helplanguage',
+ 'space',
+ 'sqrt',
+ 'square',
+ 'stats_date',
+ 'stdev',
+ 'stdevp',
+ 'str',
+ 'string_escape',
+ 'string_split',
+ 'stuff',
+ 'substring',
+ 'sum',
+ 'suser_id',
+ 'suser_name',
+ 'suser_sid',
+ 'suser_sname',
+ 'switchoffset',
+ 'sysdatetime',
+ 'sysdatetimeoffset',
+ 'system_user',
+ 'sysutcdatetime',
+ 'tan',
+ 'textptr',
+ 'textvalid',
+ 'timefromparts',
+ 'todatetimeoffset',
+ 'try_cast',
+ 'try_convert',
+ 'try_parse',
+ 'type_id',
+ 'type_name',
+ 'typeproperty',
+ 'unicode',
+ 'upper',
+ 'user_id',
+ 'user_name',
+ 'var',
+ 'varp',
+ 'xact_state',
+ 'year',
+)
diff --git a/pygments/lexers/_usd_builtins.py b/pygments/lexers/_usd_builtins.py
new file mode 100644
index 0000000..de77341
--- /dev/null
+++ b/pygments/lexers/_usd_builtins.py
@@ -0,0 +1,112 @@
+"""
+ pygments.lexers._usd_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A collection of known USD-related keywords, attributes, and types.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+COMMON_ATTRIBUTES = [
+ "extent",
+ "xformOpOrder",
+]
+
+KEYWORDS = [
+ "class",
+ "clips",
+ "custom",
+ "customData",
+ "def",
+ "dictionary",
+ "inherits",
+ "over",
+ "payload",
+ "references",
+ "rel",
+ "subLayers",
+ "timeSamples",
+ "uniform",
+ "variantSet",
+ "variantSets",
+ "variants",
+]
+
+OPERATORS = [
+ "add",
+ "append",
+ "delete",
+ "prepend",
+ "reorder",
+]
+
+SPECIAL_NAMES = [
+ "active",
+ "apiSchemas",
+ "defaultPrim",
+ "elementSize",
+ "endTimeCode",
+ "hidden",
+ "instanceable",
+ "interpolation",
+ "kind",
+ "startTimeCode",
+ "upAxis",
+]
+
+TYPES = [
+ "asset",
+ "bool",
+ "color3d",
+ "color3f",
+ "color3h",
+ "color4d",
+ "color4f",
+ "color4h",
+ "double",
+ "double2",
+ "double3",
+ "double4",
+ "float",
+ "float2",
+ "float3",
+ "float4",
+ "frame4d",
+ "half",
+ "half2",
+ "half3",
+ "half4",
+ "int",
+ "int2",
+ "int3",
+ "int4",
+ "keyword",
+ "matrix2d",
+ "matrix3d",
+ "matrix4d",
+ "normal3d",
+ "normal3f",
+ "normal3h",
+ "point3d",
+ "point3f",
+ "point3h",
+ "quatd",
+ "quatf",
+ "quath",
+ "string",
+ "syn",
+ "token",
+ "uchar",
+ "uchar2",
+ "uchar3",
+ "uchar4",
+ "uint",
+ "uint2",
+ "uint3",
+ "uint4",
+ "usdaType",
+ "vector3d",
+ "vector3f",
+ "vector3h",
+]
diff --git a/pygments/lexers/_vbscript_builtins.py b/pygments/lexers/_vbscript_builtins.py
new file mode 100644
index 0000000..68e732e
--- /dev/null
+++ b/pygments/lexers/_vbscript_builtins.py
@@ -0,0 +1,279 @@
+"""
+ pygments.lexers._vbscript_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ These are manually translated lists from
+ http://www.indusoft.com/pdf/VBScript%20Reference.pdf.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+KEYWORDS = [
+ 'ByRef',
+ 'ByVal',
+ # dim: special rule
+ 'call',
+ 'case',
+ 'class',
+ # const: special rule
+ 'do',
+ 'each',
+ 'else',
+ 'elseif',
+ 'end',
+ 'erase',
+ 'execute',
+ 'function',
+ 'exit',
+ 'for',
+ 'function',
+ 'GetRef',
+ 'global',
+ 'if',
+ 'let',
+ 'loop',
+ 'next',
+ 'new',
+ # option: special rule
+ 'private',
+ 'public',
+ 'redim',
+ 'select',
+ 'set',
+ 'sub',
+ 'then',
+ 'wend',
+ 'while',
+ 'with',
+]
+
+BUILTIN_FUNCTIONS = [
+ 'Abs',
+ 'Array',
+ 'Asc',
+ 'Atn',
+ 'CBool',
+ 'CByte',
+ 'CCur',
+ 'CDate',
+ 'CDbl',
+ 'Chr',
+ 'CInt',
+ 'CLng',
+ 'Cos',
+ 'CreateObject',
+ 'CSng',
+ 'CStr',
+ 'Date',
+ 'DateAdd',
+ 'DateDiff',
+ 'DatePart',
+ 'DateSerial',
+ 'DateValue',
+ 'Day',
+ 'Eval',
+ 'Exp',
+ 'Filter',
+ 'Fix',
+ 'FormatCurrency',
+ 'FormatDateTime',
+ 'FormatNumber',
+ 'FormatPercent',
+ 'GetObject',
+ 'GetLocale',
+ 'Hex',
+ 'Hour',
+ 'InStr',
+ 'inStrRev',
+ 'Int',
+ 'IsArray',
+ 'IsDate',
+ 'IsEmpty',
+ 'IsNull',
+ 'IsNumeric',
+ 'IsObject',
+ 'Join',
+ 'LBound',
+ 'LCase',
+ 'Left',
+ 'Len',
+ 'LoadPicture',
+ 'Log',
+ 'LTrim',
+ 'Mid',
+ 'Minute',
+ 'Month',
+ 'MonthName',
+ 'MsgBox',
+ 'Now',
+ 'Oct',
+ 'Randomize',
+ 'RegExp',
+ 'Replace',
+ 'RGB',
+ 'Right',
+ 'Rnd',
+ 'Round',
+ 'RTrim',
+ 'ScriptEngine',
+ 'ScriptEngineBuildVersion',
+ 'ScriptEngineMajorVersion',
+ 'ScriptEngineMinorVersion',
+ 'Second',
+ 'SetLocale',
+ 'Sgn',
+ 'Space',
+ 'Split',
+ 'Sqr',
+ 'StrComp',
+ 'String',
+ 'StrReverse',
+ 'Tan',
+ 'Time',
+ 'Timer',
+ 'TimeSerial',
+ 'TimeValue',
+ 'Trim',
+ 'TypeName',
+ 'UBound',
+ 'UCase',
+ 'VarType',
+ 'Weekday',
+ 'WeekdayName',
+ 'Year',
+]
+
+BUILTIN_VARIABLES = [
+ 'Debug',
+ 'Dictionary',
+ 'Drive',
+ 'Drives',
+ 'Err',
+ 'File',
+ 'Files',
+ 'FileSystemObject',
+ 'Folder',
+ 'Folders',
+ 'Match',
+ 'Matches',
+ 'RegExp',
+ 'Submatches',
+ 'TextStream',
+]
+
+OPERATORS = [
+ '+',
+ '-',
+ '*',
+ '/',
+ '\\',
+ '^',
+ '|',
+ '<',
+ '<=',
+ '>',
+ '>=',
+ '=',
+ '<>',
+ '&',
+ '$',
+]
+
+OPERATOR_WORDS = [
+ 'mod',
+ 'and',
+ 'or',
+ 'xor',
+ 'eqv',
+ 'imp',
+ 'is',
+ 'not',
+]
+
+BUILTIN_CONSTANTS = [
+ 'False',
+ 'True',
+ 'vbAbort',
+ 'vbAbortRetryIgnore',
+ 'vbApplicationModal',
+ 'vbArray',
+ 'vbBinaryCompare',
+ 'vbBlack',
+ 'vbBlue',
+ 'vbBoole',
+ 'vbByte',
+ 'vbCancel',
+ 'vbCr',
+ 'vbCritical',
+ 'vbCrLf',
+ 'vbCurrency',
+ 'vbCyan',
+ 'vbDataObject',
+ 'vbDate',
+ 'vbDefaultButton1',
+ 'vbDefaultButton2',
+ 'vbDefaultButton3',
+ 'vbDefaultButton4',
+ 'vbDouble',
+ 'vbEmpty',
+ 'vbError',
+ 'vbExclamation',
+ 'vbFalse',
+ 'vbFirstFullWeek',
+ 'vbFirstJan1',
+ 'vbFormFeed',
+ 'vbFriday',
+ 'vbGeneralDate',
+ 'vbGreen',
+ 'vbIgnore',
+ 'vbInformation',
+ 'vbInteger',
+ 'vbLf',
+ 'vbLong',
+ 'vbLongDate',
+ 'vbLongTime',
+ 'vbMagenta',
+ 'vbMonday',
+ 'vbMsgBoxHelpButton',
+ 'vbMsgBoxRight',
+ 'vbMsgBoxRtlReading',
+ 'vbMsgBoxSetForeground',
+ 'vbNewLine',
+ 'vbNo',
+ 'vbNull',
+ 'vbNullChar',
+ 'vbNullString',
+ 'vbObject',
+ 'vbObjectError',
+ 'vbOK',
+ 'vbOKCancel',
+ 'vbOKOnly',
+ 'vbQuestion',
+ 'vbRed',
+ 'vbRetry',
+ 'vbRetryCancel',
+ 'vbSaturday',
+ 'vbShortDate',
+ 'vbShortTime',
+ 'vbSingle',
+ 'vbString',
+ 'vbSunday',
+ 'vbSystemModal',
+ 'vbTab',
+ 'vbTextCompare',
+ 'vbThursday',
+ 'vbTrue',
+ 'vbTuesday',
+ 'vbUseDefault',
+ 'vbUseSystem',
+ 'vbUseSystem',
+ 'vbVariant',
+ 'vbVerticalTab',
+ 'vbWednesday',
+ 'vbWhite',
+ 'vbYellow',
+ 'vbYes',
+ 'vbYesNo',
+ 'vbYesNoCancel',
+]
diff --git a/pygments/lexers/_vim_builtins.py b/pygments/lexers/_vim_builtins.py
new file mode 100644
index 0000000..12eac49
--- /dev/null
+++ b/pygments/lexers/_vim_builtins.py
@@ -0,0 +1,1938 @@
+"""
+ pygments.lexers._vim_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file is autogenerated by scripts/get_vimkw.py
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Split up in multiple functions so it's importable by jython, which has a
+# per-method size limit.
+
+def _getauto():
+ var = (
+ ('BufAdd','BufAdd'),
+ ('BufCreate','BufCreate'),
+ ('BufDelete','BufDelete'),
+ ('BufEnter','BufEnter'),
+ ('BufFilePost','BufFilePost'),
+ ('BufFilePre','BufFilePre'),
+ ('BufHidden','BufHidden'),
+ ('BufLeave','BufLeave'),
+ ('BufNew','BufNew'),
+ ('BufNewFile','BufNewFile'),
+ ('BufRead','BufRead'),
+ ('BufReadCmd','BufReadCmd'),
+ ('BufReadPost','BufReadPost'),
+ ('BufReadPre','BufReadPre'),
+ ('BufUnload','BufUnload'),
+ ('BufWinEnter','BufWinEnter'),
+ ('BufWinLeave','BufWinLeave'),
+ ('BufWipeout','BufWipeout'),
+ ('BufWrite','BufWrite'),
+ ('BufWriteCmd','BufWriteCmd'),
+ ('BufWritePost','BufWritePost'),
+ ('BufWritePre','BufWritePre'),
+ ('Cmd','Cmd'),
+ ('CmdwinEnter','CmdwinEnter'),
+ ('CmdwinLeave','CmdwinLeave'),
+ ('ColorScheme','ColorScheme'),
+ ('CompleteDone','CompleteDone'),
+ ('CursorHold','CursorHold'),
+ ('CursorHoldI','CursorHoldI'),
+ ('CursorMoved','CursorMoved'),
+ ('CursorMovedI','CursorMovedI'),
+ ('EncodingChanged','EncodingChanged'),
+ ('FileAppendCmd','FileAppendCmd'),
+ ('FileAppendPost','FileAppendPost'),
+ ('FileAppendPre','FileAppendPre'),
+ ('FileChangedRO','FileChangedRO'),
+ ('FileChangedShell','FileChangedShell'),
+ ('FileChangedShellPost','FileChangedShellPost'),
+ ('FileEncoding','FileEncoding'),
+ ('FileReadCmd','FileReadCmd'),
+ ('FileReadPost','FileReadPost'),
+ ('FileReadPre','FileReadPre'),
+ ('FileType','FileType'),
+ ('FileWriteCmd','FileWriteCmd'),
+ ('FileWritePost','FileWritePost'),
+ ('FileWritePre','FileWritePre'),
+ ('FilterReadPost','FilterReadPost'),
+ ('FilterReadPre','FilterReadPre'),
+ ('FilterWritePost','FilterWritePost'),
+ ('FilterWritePre','FilterWritePre'),
+ ('FocusGained','FocusGained'),
+ ('FocusLost','FocusLost'),
+ ('FuncUndefined','FuncUndefined'),
+ ('GUIEnter','GUIEnter'),
+ ('GUIFailed','GUIFailed'),
+ ('InsertChange','InsertChange'),
+ ('InsertCharPre','InsertCharPre'),
+ ('InsertEnter','InsertEnter'),
+ ('InsertLeave','InsertLeave'),
+ ('MenuPopup','MenuPopup'),
+ ('QuickFixCmdPost','QuickFixCmdPost'),
+ ('QuickFixCmdPre','QuickFixCmdPre'),
+ ('QuitPre','QuitPre'),
+ ('RemoteReply','RemoteReply'),
+ ('SessionLoadPost','SessionLoadPost'),
+ ('ShellCmdPost','ShellCmdPost'),
+ ('ShellFilterPost','ShellFilterPost'),
+ ('SourceCmd','SourceCmd'),
+ ('SourcePre','SourcePre'),
+ ('SpellFileMissing','SpellFileMissing'),
+ ('StdinReadPost','StdinReadPost'),
+ ('StdinReadPre','StdinReadPre'),
+ ('SwapExists','SwapExists'),
+ ('Syntax','Syntax'),
+ ('TabEnter','TabEnter'),
+ ('TabLeave','TabLeave'),
+ ('TermChanged','TermChanged'),
+ ('TermResponse','TermResponse'),
+ ('TextChanged','TextChanged'),
+ ('TextChangedI','TextChangedI'),
+ ('User','User'),
+ ('UserGettingBored','UserGettingBored'),
+ ('VimEnter','VimEnter'),
+ ('VimLeave','VimLeave'),
+ ('VimLeavePre','VimLeavePre'),
+ ('VimResized','VimResized'),
+ ('WinEnter','WinEnter'),
+ ('WinLeave','WinLeave'),
+ ('event','event'),
+ )
+ return var
+auto = _getauto()
+
+def _getcommand():
+ var = (
+ ('a','a'),
+ ('ab','ab'),
+ ('abc','abclear'),
+ ('abo','aboveleft'),
+ ('al','all'),
+ ('ar','ar'),
+ ('ar','args'),
+ ('arga','argadd'),
+ ('argd','argdelete'),
+ ('argdo','argdo'),
+ ('arge','argedit'),
+ ('argg','argglobal'),
+ ('argl','arglocal'),
+ ('argu','argument'),
+ ('as','ascii'),
+ ('au','au'),
+ ('b','buffer'),
+ ('bN','bNext'),
+ ('ba','ball'),
+ ('bad','badd'),
+ ('bd','bdelete'),
+ ('bel','belowright'),
+ ('bf','bfirst'),
+ ('bl','blast'),
+ ('bm','bmodified'),
+ ('bn','bnext'),
+ ('bo','botright'),
+ ('bp','bprevious'),
+ ('br','br'),
+ ('br','brewind'),
+ ('brea','break'),
+ ('breaka','breakadd'),
+ ('breakd','breakdel'),
+ ('breakl','breaklist'),
+ ('bro','browse'),
+ ('bu','bu'),
+ ('buf','buf'),
+ ('bufdo','bufdo'),
+ ('buffers','buffers'),
+ ('bun','bunload'),
+ ('bw','bwipeout'),
+ ('c','c'),
+ ('c','change'),
+ ('cN','cN'),
+ ('cN','cNext'),
+ ('cNf','cNf'),
+ ('cNf','cNfile'),
+ ('cabc','cabclear'),
+ ('cad','cad'),
+ ('cad','caddexpr'),
+ ('caddb','caddbuffer'),
+ ('caddf','caddfile'),
+ ('cal','call'),
+ ('cat','catch'),
+ ('cb','cbuffer'),
+ ('cc','cc'),
+ ('ccl','cclose'),
+ ('cd','cd'),
+ ('ce','center'),
+ ('cex','cexpr'),
+ ('cf','cfile'),
+ ('cfir','cfirst'),
+ ('cg','cgetfile'),
+ ('cgetb','cgetbuffer'),
+ ('cgete','cgetexpr'),
+ ('changes','changes'),
+ ('chd','chdir'),
+ ('che','checkpath'),
+ ('checkt','checktime'),
+ ('cl','cl'),
+ ('cl','clist'),
+ ('cla','clast'),
+ ('clo','close'),
+ ('cmapc','cmapclear'),
+ ('cn','cn'),
+ ('cn','cnext'),
+ ('cnew','cnewer'),
+ ('cnf','cnf'),
+ ('cnf','cnfile'),
+ ('co','copy'),
+ ('col','colder'),
+ ('colo','colorscheme'),
+ ('com','com'),
+ ('comc','comclear'),
+ ('comp','compiler'),
+ ('con','con'),
+ ('con','continue'),
+ ('conf','confirm'),
+ ('cope','copen'),
+ ('cp','cprevious'),
+ ('cpf','cpfile'),
+ ('cq','cquit'),
+ ('cr','crewind'),
+ ('cs','cs'),
+ ('cscope','cscope'),
+ ('cstag','cstag'),
+ ('cuna','cunabbrev'),
+ ('cw','cwindow'),
+ ('d','d'),
+ ('d','delete'),
+ ('de','de'),
+ ('debug','debug'),
+ ('debugg','debuggreedy'),
+ ('del','del'),
+ ('delc','delcommand'),
+ ('delel','delel'),
+ ('delep','delep'),
+ ('deletel','deletel'),
+ ('deletep','deletep'),
+ ('deletl','deletl'),
+ ('deletp','deletp'),
+ ('delf','delf'),
+ ('delf','delfunction'),
+ ('dell','dell'),
+ ('delm','delmarks'),
+ ('delp','delp'),
+ ('dep','dep'),
+ ('di','di'),
+ ('di','display'),
+ ('diffg','diffget'),
+ ('diffo','diffoff'),
+ ('diffp','diffpatch'),
+ ('diffpu','diffput'),
+ ('diffs','diffsplit'),
+ ('difft','diffthis'),
+ ('diffu','diffupdate'),
+ ('dig','dig'),
+ ('dig','digraphs'),
+ ('dir','dir'),
+ ('dj','djump'),
+ ('dl','dl'),
+ ('dli','dlist'),
+ ('do','do'),
+ ('doau','doau'),
+ ('dp','dp'),
+ ('dr','drop'),
+ ('ds','dsearch'),
+ ('dsp','dsplit'),
+ ('e','e'),
+ ('e','edit'),
+ ('ea','ea'),
+ ('earlier','earlier'),
+ ('ec','ec'),
+ ('echoe','echoerr'),
+ ('echom','echomsg'),
+ ('echon','echon'),
+ ('el','else'),
+ ('elsei','elseif'),
+ ('em','emenu'),
+ ('en','en'),
+ ('en','endif'),
+ ('endf','endf'),
+ ('endf','endfunction'),
+ ('endfo','endfor'),
+ ('endfun','endfun'),
+ ('endt','endtry'),
+ ('endw','endwhile'),
+ ('ene','enew'),
+ ('ex','ex'),
+ ('exi','exit'),
+ ('exu','exusage'),
+ ('f','f'),
+ ('f','file'),
+ ('files','files'),
+ ('filet','filet'),
+ ('filetype','filetype'),
+ ('fin','fin'),
+ ('fin','find'),
+ ('fina','finally'),
+ ('fini','finish'),
+ ('fir','first'),
+ ('fix','fixdel'),
+ ('fo','fold'),
+ ('foldc','foldclose'),
+ ('foldd','folddoopen'),
+ ('folddoc','folddoclosed'),
+ ('foldo','foldopen'),
+ ('for','for'),
+ ('fu','fu'),
+ ('fu','function'),
+ ('fun','fun'),
+ ('g','g'),
+ ('go','goto'),
+ ('gr','grep'),
+ ('grepa','grepadd'),
+ ('gui','gui'),
+ ('gvim','gvim'),
+ ('h','h'),
+ ('h','help'),
+ ('ha','hardcopy'),
+ ('helpf','helpfind'),
+ ('helpg','helpgrep'),
+ ('helpt','helptags'),
+ ('hi','hi'),
+ ('hid','hide'),
+ ('his','history'),
+ ('i','i'),
+ ('ia','ia'),
+ ('iabc','iabclear'),
+ ('if','if'),
+ ('ij','ijump'),
+ ('il','ilist'),
+ ('imapc','imapclear'),
+ ('in','in'),
+ ('intro','intro'),
+ ('is','isearch'),
+ ('isp','isplit'),
+ ('iuna','iunabbrev'),
+ ('j','join'),
+ ('ju','jumps'),
+ ('k','k'),
+ ('kee','keepmarks'),
+ ('keepa','keepa'),
+ ('keepalt','keepalt'),
+ ('keepj','keepjumps'),
+ ('keepp','keeppatterns'),
+ ('l','l'),
+ ('l','list'),
+ ('lN','lN'),
+ ('lN','lNext'),
+ ('lNf','lNf'),
+ ('lNf','lNfile'),
+ ('la','la'),
+ ('la','last'),
+ ('lad','lad'),
+ ('lad','laddexpr'),
+ ('laddb','laddbuffer'),
+ ('laddf','laddfile'),
+ ('lan','lan'),
+ ('lan','language'),
+ ('lat','lat'),
+ ('later','later'),
+ ('lb','lbuffer'),
+ ('lc','lcd'),
+ ('lch','lchdir'),
+ ('lcl','lclose'),
+ ('lcs','lcs'),
+ ('lcscope','lcscope'),
+ ('le','left'),
+ ('lefta','leftabove'),
+ ('lex','lexpr'),
+ ('lf','lfile'),
+ ('lfir','lfirst'),
+ ('lg','lgetfile'),
+ ('lgetb','lgetbuffer'),
+ ('lgete','lgetexpr'),
+ ('lgr','lgrep'),
+ ('lgrepa','lgrepadd'),
+ ('lh','lhelpgrep'),
+ ('ll','ll'),
+ ('lla','llast'),
+ ('lli','llist'),
+ ('lmak','lmake'),
+ ('lmapc','lmapclear'),
+ ('lne','lne'),
+ ('lne','lnext'),
+ ('lnew','lnewer'),
+ ('lnf','lnf'),
+ ('lnf','lnfile'),
+ ('lo','lo'),
+ ('lo','loadview'),
+ ('loadk','loadk'),
+ ('loadkeymap','loadkeymap'),
+ ('loc','lockmarks'),
+ ('lockv','lockvar'),
+ ('lol','lolder'),
+ ('lop','lopen'),
+ ('lp','lprevious'),
+ ('lpf','lpfile'),
+ ('lr','lrewind'),
+ ('ls','ls'),
+ ('lt','ltag'),
+ ('lua','lua'),
+ ('luado','luado'),
+ ('luafile','luafile'),
+ ('lv','lvimgrep'),
+ ('lvimgrepa','lvimgrepadd'),
+ ('lw','lwindow'),
+ ('m','move'),
+ ('ma','ma'),
+ ('ma','mark'),
+ ('mak','make'),
+ ('marks','marks'),
+ ('mat','match'),
+ ('menut','menut'),
+ ('menut','menutranslate'),
+ ('mes','mes'),
+ ('messages','messages'),
+ ('mk','mk'),
+ ('mk','mkexrc'),
+ ('mks','mksession'),
+ ('mksp','mkspell'),
+ ('mkv','mkv'),
+ ('mkv','mkvimrc'),
+ ('mkvie','mkview'),
+ ('mo','mo'),
+ ('mod','mode'),
+ ('mz','mz'),
+ ('mz','mzscheme'),
+ ('mzf','mzfile'),
+ ('n','n'),
+ ('n','next'),
+ ('nb','nbkey'),
+ ('nbc','nbclose'),
+ ('nbs','nbstart'),
+ ('ne','ne'),
+ ('new','new'),
+ ('nmapc','nmapclear'),
+ ('noa','noa'),
+ ('noautocmd','noautocmd'),
+ ('noh','nohlsearch'),
+ ('nu','number'),
+ ('o','o'),
+ ('o','open'),
+ ('ol','oldfiles'),
+ ('omapc','omapclear'),
+ ('on','only'),
+ ('opt','options'),
+ ('ownsyntax','ownsyntax'),
+ ('p','p'),
+ ('p','print'),
+ ('pc','pclose'),
+ ('pe','pe'),
+ ('pe','perl'),
+ ('ped','pedit'),
+ ('perld','perldo'),
+ ('po','pop'),
+ ('popu','popu'),
+ ('popu','popup'),
+ ('pp','ppop'),
+ ('pr','pr'),
+ ('pre','preserve'),
+ ('prev','previous'),
+ ('pro','pro'),
+ ('prof','profile'),
+ ('profd','profdel'),
+ ('promptf','promptfind'),
+ ('promptr','promptrepl'),
+ ('ps','psearch'),
+ ('ptN','ptN'),
+ ('ptN','ptNext'),
+ ('pta','ptag'),
+ ('ptf','ptfirst'),
+ ('ptj','ptjump'),
+ ('ptl','ptlast'),
+ ('ptn','ptn'),
+ ('ptn','ptnext'),
+ ('ptp','ptprevious'),
+ ('ptr','ptrewind'),
+ ('pts','ptselect'),
+ ('pu','put'),
+ ('pw','pwd'),
+ ('py','py'),
+ ('py','python'),
+ ('py3','py3'),
+ ('py3','py3'),
+ ('py3do','py3do'),
+ ('pydo','pydo'),
+ ('pyf','pyfile'),
+ ('python3','python3'),
+ ('q','q'),
+ ('q','quit'),
+ ('qa','qall'),
+ ('quita','quitall'),
+ ('r','r'),
+ ('r','read'),
+ ('re','re'),
+ ('rec','recover'),
+ ('red','red'),
+ ('red','redo'),
+ ('redi','redir'),
+ ('redr','redraw'),
+ ('redraws','redrawstatus'),
+ ('reg','registers'),
+ ('res','resize'),
+ ('ret','retab'),
+ ('retu','return'),
+ ('rew','rewind'),
+ ('ri','right'),
+ ('rightb','rightbelow'),
+ ('ru','ru'),
+ ('ru','runtime'),
+ ('rub','ruby'),
+ ('rubyd','rubydo'),
+ ('rubyf','rubyfile'),
+ ('rundo','rundo'),
+ ('rv','rviminfo'),
+ ('sN','sNext'),
+ ('sa','sargument'),
+ ('sal','sall'),
+ ('san','sandbox'),
+ ('sav','saveas'),
+ ('sb','sbuffer'),
+ ('sbN','sbNext'),
+ ('sba','sball'),
+ ('sbf','sbfirst'),
+ ('sbl','sblast'),
+ ('sbm','sbmodified'),
+ ('sbn','sbnext'),
+ ('sbp','sbprevious'),
+ ('sbr','sbrewind'),
+ ('scrip','scrip'),
+ ('scrip','scriptnames'),
+ ('scripte','scriptencoding'),
+ ('scs','scs'),
+ ('scscope','scscope'),
+ ('se','set'),
+ ('setf','setfiletype'),
+ ('setg','setglobal'),
+ ('setl','setlocal'),
+ ('sf','sfind'),
+ ('sfir','sfirst'),
+ ('sh','shell'),
+ ('si','si'),
+ ('sig','sig'),
+ ('sign','sign'),
+ ('sil','silent'),
+ ('sim','simalt'),
+ ('sl','sl'),
+ ('sl','sleep'),
+ ('sla','slast'),
+ ('sm','smagic'),
+ ('sm','smap'),
+ ('sme','sme'),
+ ('smenu','smenu'),
+ ('sn','snext'),
+ ('sni','sniff'),
+ ('sno','snomagic'),
+ ('snoreme','snoreme'),
+ ('snoremenu','snoremenu'),
+ ('so','so'),
+ ('so','source'),
+ ('sor','sort'),
+ ('sp','split'),
+ ('spe','spe'),
+ ('spe','spellgood'),
+ ('spelld','spelldump'),
+ ('spelli','spellinfo'),
+ ('spellr','spellrepall'),
+ ('spellu','spellundo'),
+ ('spellw','spellwrong'),
+ ('spr','sprevious'),
+ ('sre','srewind'),
+ ('st','st'),
+ ('st','stop'),
+ ('sta','stag'),
+ ('star','star'),
+ ('star','startinsert'),
+ ('start','start'),
+ ('startg','startgreplace'),
+ ('startr','startreplace'),
+ ('stj','stjump'),
+ ('stopi','stopinsert'),
+ ('sts','stselect'),
+ ('sun','sunhide'),
+ ('sunme','sunme'),
+ ('sunmenu','sunmenu'),
+ ('sus','suspend'),
+ ('sv','sview'),
+ ('sw','swapname'),
+ ('sy','sy'),
+ ('syn','syn'),
+ ('sync','sync'),
+ ('syncbind','syncbind'),
+ ('syntime','syntime'),
+ ('t','t'),
+ ('tN','tN'),
+ ('tN','tNext'),
+ ('ta','ta'),
+ ('ta','tag'),
+ ('tab','tab'),
+ ('tabN','tabN'),
+ ('tabN','tabNext'),
+ ('tabc','tabclose'),
+ ('tabd','tabdo'),
+ ('tabe','tabedit'),
+ ('tabf','tabfind'),
+ ('tabfir','tabfirst'),
+ ('tabl','tablast'),
+ ('tabm','tabmove'),
+ ('tabn','tabnext'),
+ ('tabnew','tabnew'),
+ ('tabo','tabonly'),
+ ('tabp','tabprevious'),
+ ('tabr','tabrewind'),
+ ('tabs','tabs'),
+ ('tags','tags'),
+ ('tc','tcl'),
+ ('tcld','tcldo'),
+ ('tclf','tclfile'),
+ ('te','tearoff'),
+ ('tf','tfirst'),
+ ('th','throw'),
+ ('tj','tjump'),
+ ('tl','tlast'),
+ ('tm','tm'),
+ ('tm','tmenu'),
+ ('tn','tn'),
+ ('tn','tnext'),
+ ('to','topleft'),
+ ('tp','tprevious'),
+ ('tr','tr'),
+ ('tr','trewind'),
+ ('try','try'),
+ ('ts','tselect'),
+ ('tu','tu'),
+ ('tu','tunmenu'),
+ ('u','u'),
+ ('u','undo'),
+ ('un','un'),
+ ('una','unabbreviate'),
+ ('undoj','undojoin'),
+ ('undol','undolist'),
+ ('unh','unhide'),
+ ('unl','unl'),
+ ('unlo','unlockvar'),
+ ('uns','unsilent'),
+ ('up','update'),
+ ('v','v'),
+ ('ve','ve'),
+ ('ve','version'),
+ ('verb','verbose'),
+ ('vert','vertical'),
+ ('vi','vi'),
+ ('vi','visual'),
+ ('vie','view'),
+ ('vim','vimgrep'),
+ ('vimgrepa','vimgrepadd'),
+ ('viu','viusage'),
+ ('vmapc','vmapclear'),
+ ('vne','vnew'),
+ ('vs','vsplit'),
+ ('w','w'),
+ ('w','write'),
+ ('wN','wNext'),
+ ('wa','wall'),
+ ('wh','while'),
+ ('win','win'),
+ ('win','winsize'),
+ ('winc','wincmd'),
+ ('windo','windo'),
+ ('winp','winpos'),
+ ('wn','wnext'),
+ ('wp','wprevious'),
+ ('wq','wq'),
+ ('wqa','wqall'),
+ ('ws','wsverb'),
+ ('wundo','wundo'),
+ ('wv','wviminfo'),
+ ('x','x'),
+ ('x','xit'),
+ ('xa','xall'),
+ ('xmapc','xmapclear'),
+ ('xme','xme'),
+ ('xmenu','xmenu'),
+ ('xnoreme','xnoreme'),
+ ('xnoremenu','xnoremenu'),
+ ('xunme','xunme'),
+ ('xunmenu','xunmenu'),
+ ('xwininfo','xwininfo'),
+ ('y','yank'),
+ )
+ return var
+command = _getcommand()
+
+def _getoption():
+ var = (
+ ('acd','acd'),
+ ('ai','ai'),
+ ('akm','akm'),
+ ('al','al'),
+ ('aleph','aleph'),
+ ('allowrevins','allowrevins'),
+ ('altkeymap','altkeymap'),
+ ('ambiwidth','ambiwidth'),
+ ('ambw','ambw'),
+ ('anti','anti'),
+ ('antialias','antialias'),
+ ('ar','ar'),
+ ('arab','arab'),
+ ('arabic','arabic'),
+ ('arabicshape','arabicshape'),
+ ('ari','ari'),
+ ('arshape','arshape'),
+ ('autochdir','autochdir'),
+ ('autoindent','autoindent'),
+ ('autoread','autoread'),
+ ('autowrite','autowrite'),
+ ('autowriteall','autowriteall'),
+ ('aw','aw'),
+ ('awa','awa'),
+ ('background','background'),
+ ('backspace','backspace'),
+ ('backup','backup'),
+ ('backupcopy','backupcopy'),
+ ('backupdir','backupdir'),
+ ('backupext','backupext'),
+ ('backupskip','backupskip'),
+ ('balloondelay','balloondelay'),
+ ('ballooneval','ballooneval'),
+ ('balloonexpr','balloonexpr'),
+ ('bdir','bdir'),
+ ('bdlay','bdlay'),
+ ('beval','beval'),
+ ('bex','bex'),
+ ('bexpr','bexpr'),
+ ('bg','bg'),
+ ('bh','bh'),
+ ('bin','bin'),
+ ('binary','binary'),
+ ('biosk','biosk'),
+ ('bioskey','bioskey'),
+ ('bk','bk'),
+ ('bkc','bkc'),
+ ('bl','bl'),
+ ('bomb','bomb'),
+ ('breakat','breakat'),
+ ('brk','brk'),
+ ('browsedir','browsedir'),
+ ('bs','bs'),
+ ('bsdir','bsdir'),
+ ('bsk','bsk'),
+ ('bt','bt'),
+ ('bufhidden','bufhidden'),
+ ('buflisted','buflisted'),
+ ('buftype','buftype'),
+ ('casemap','casemap'),
+ ('cb','cb'),
+ ('cc','cc'),
+ ('ccv','ccv'),
+ ('cd','cd'),
+ ('cdpath','cdpath'),
+ ('cedit','cedit'),
+ ('cf','cf'),
+ ('cfu','cfu'),
+ ('ch','ch'),
+ ('charconvert','charconvert'),
+ ('ci','ci'),
+ ('cin','cin'),
+ ('cindent','cindent'),
+ ('cink','cink'),
+ ('cinkeys','cinkeys'),
+ ('cino','cino'),
+ ('cinoptions','cinoptions'),
+ ('cinw','cinw'),
+ ('cinwords','cinwords'),
+ ('clipboard','clipboard'),
+ ('cmdheight','cmdheight'),
+ ('cmdwinheight','cmdwinheight'),
+ ('cmp','cmp'),
+ ('cms','cms'),
+ ('co','co'),
+ ('cocu','cocu'),
+ ('cole','cole'),
+ ('colorcolumn','colorcolumn'),
+ ('columns','columns'),
+ ('com','com'),
+ ('comments','comments'),
+ ('commentstring','commentstring'),
+ ('compatible','compatible'),
+ ('complete','complete'),
+ ('completefunc','completefunc'),
+ ('completeopt','completeopt'),
+ ('concealcursor','concealcursor'),
+ ('conceallevel','conceallevel'),
+ ('confirm','confirm'),
+ ('consk','consk'),
+ ('conskey','conskey'),
+ ('copyindent','copyindent'),
+ ('cot','cot'),
+ ('cp','cp'),
+ ('cpo','cpo'),
+ ('cpoptions','cpoptions'),
+ ('cpt','cpt'),
+ ('crb','crb'),
+ ('cryptmethod','cryptmethod'),
+ ('cscopepathcomp','cscopepathcomp'),
+ ('cscopeprg','cscopeprg'),
+ ('cscopequickfix','cscopequickfix'),
+ ('cscoperelative','cscoperelative'),
+ ('cscopetag','cscopetag'),
+ ('cscopetagorder','cscopetagorder'),
+ ('cscopeverbose','cscopeverbose'),
+ ('cspc','cspc'),
+ ('csprg','csprg'),
+ ('csqf','csqf'),
+ ('csre','csre'),
+ ('cst','cst'),
+ ('csto','csto'),
+ ('csverb','csverb'),
+ ('cuc','cuc'),
+ ('cul','cul'),
+ ('cursorbind','cursorbind'),
+ ('cursorcolumn','cursorcolumn'),
+ ('cursorline','cursorline'),
+ ('cwh','cwh'),
+ ('debug','debug'),
+ ('deco','deco'),
+ ('def','def'),
+ ('define','define'),
+ ('delcombine','delcombine'),
+ ('dex','dex'),
+ ('dg','dg'),
+ ('dict','dict'),
+ ('dictionary','dictionary'),
+ ('diff','diff'),
+ ('diffexpr','diffexpr'),
+ ('diffopt','diffopt'),
+ ('digraph','digraph'),
+ ('dip','dip'),
+ ('dir','dir'),
+ ('directory','directory'),
+ ('display','display'),
+ ('dy','dy'),
+ ('ea','ea'),
+ ('ead','ead'),
+ ('eadirection','eadirection'),
+ ('eb','eb'),
+ ('ed','ed'),
+ ('edcompatible','edcompatible'),
+ ('ef','ef'),
+ ('efm','efm'),
+ ('ei','ei'),
+ ('ek','ek'),
+ ('enc','enc'),
+ ('encoding','encoding'),
+ ('endofline','endofline'),
+ ('eol','eol'),
+ ('ep','ep'),
+ ('equalalways','equalalways'),
+ ('equalprg','equalprg'),
+ ('errorbells','errorbells'),
+ ('errorfile','errorfile'),
+ ('errorformat','errorformat'),
+ ('esckeys','esckeys'),
+ ('et','et'),
+ ('eventignore','eventignore'),
+ ('ex','ex'),
+ ('expandtab','expandtab'),
+ ('exrc','exrc'),
+ ('fcl','fcl'),
+ ('fcs','fcs'),
+ ('fdc','fdc'),
+ ('fde','fde'),
+ ('fdi','fdi'),
+ ('fdl','fdl'),
+ ('fdls','fdls'),
+ ('fdm','fdm'),
+ ('fdn','fdn'),
+ ('fdo','fdo'),
+ ('fdt','fdt'),
+ ('fen','fen'),
+ ('fenc','fenc'),
+ ('fencs','fencs'),
+ ('fex','fex'),
+ ('ff','ff'),
+ ('ffs','ffs'),
+ ('fic','fic'),
+ ('fileencoding','fileencoding'),
+ ('fileencodings','fileencodings'),
+ ('fileformat','fileformat'),
+ ('fileformats','fileformats'),
+ ('fileignorecase','fileignorecase'),
+ ('filetype','filetype'),
+ ('fillchars','fillchars'),
+ ('fk','fk'),
+ ('fkmap','fkmap'),
+ ('flp','flp'),
+ ('fml','fml'),
+ ('fmr','fmr'),
+ ('fo','fo'),
+ ('foldclose','foldclose'),
+ ('foldcolumn','foldcolumn'),
+ ('foldenable','foldenable'),
+ ('foldexpr','foldexpr'),
+ ('foldignore','foldignore'),
+ ('foldlevel','foldlevel'),
+ ('foldlevelstart','foldlevelstart'),
+ ('foldmarker','foldmarker'),
+ ('foldmethod','foldmethod'),
+ ('foldminlines','foldminlines'),
+ ('foldnestmax','foldnestmax'),
+ ('foldopen','foldopen'),
+ ('foldtext','foldtext'),
+ ('formatexpr','formatexpr'),
+ ('formatlistpat','formatlistpat'),
+ ('formatoptions','formatoptions'),
+ ('formatprg','formatprg'),
+ ('fp','fp'),
+ ('fs','fs'),
+ ('fsync','fsync'),
+ ('ft','ft'),
+ ('gcr','gcr'),
+ ('gd','gd'),
+ ('gdefault','gdefault'),
+ ('gfm','gfm'),
+ ('gfn','gfn'),
+ ('gfs','gfs'),
+ ('gfw','gfw'),
+ ('ghr','ghr'),
+ ('go','go'),
+ ('gp','gp'),
+ ('grepformat','grepformat'),
+ ('grepprg','grepprg'),
+ ('gtl','gtl'),
+ ('gtt','gtt'),
+ ('guicursor','guicursor'),
+ ('guifont','guifont'),
+ ('guifontset','guifontset'),
+ ('guifontwide','guifontwide'),
+ ('guiheadroom','guiheadroom'),
+ ('guioptions','guioptions'),
+ ('guipty','guipty'),
+ ('guitablabel','guitablabel'),
+ ('guitabtooltip','guitabtooltip'),
+ ('helpfile','helpfile'),
+ ('helpheight','helpheight'),
+ ('helplang','helplang'),
+ ('hf','hf'),
+ ('hh','hh'),
+ ('hi','hi'),
+ ('hid','hid'),
+ ('hidden','hidden'),
+ ('highlight','highlight'),
+ ('history','history'),
+ ('hk','hk'),
+ ('hkmap','hkmap'),
+ ('hkmapp','hkmapp'),
+ ('hkp','hkp'),
+ ('hl','hl'),
+ ('hlg','hlg'),
+ ('hls','hls'),
+ ('hlsearch','hlsearch'),
+ ('ic','ic'),
+ ('icon','icon'),
+ ('iconstring','iconstring'),
+ ('ignorecase','ignorecase'),
+ ('im','im'),
+ ('imactivatefunc','imactivatefunc'),
+ ('imactivatekey','imactivatekey'),
+ ('imaf','imaf'),
+ ('imak','imak'),
+ ('imc','imc'),
+ ('imcmdline','imcmdline'),
+ ('imd','imd'),
+ ('imdisable','imdisable'),
+ ('imi','imi'),
+ ('iminsert','iminsert'),
+ ('ims','ims'),
+ ('imsearch','imsearch'),
+ ('imsf','imsf'),
+ ('imstatusfunc','imstatusfunc'),
+ ('inc','inc'),
+ ('include','include'),
+ ('includeexpr','includeexpr'),
+ ('incsearch','incsearch'),
+ ('inde','inde'),
+ ('indentexpr','indentexpr'),
+ ('indentkeys','indentkeys'),
+ ('indk','indk'),
+ ('inex','inex'),
+ ('inf','inf'),
+ ('infercase','infercase'),
+ ('inoremap','inoremap'),
+ ('insertmode','insertmode'),
+ ('invacd','invacd'),
+ ('invai','invai'),
+ ('invakm','invakm'),
+ ('invallowrevins','invallowrevins'),
+ ('invaltkeymap','invaltkeymap'),
+ ('invanti','invanti'),
+ ('invantialias','invantialias'),
+ ('invar','invar'),
+ ('invarab','invarab'),
+ ('invarabic','invarabic'),
+ ('invarabicshape','invarabicshape'),
+ ('invari','invari'),
+ ('invarshape','invarshape'),
+ ('invautochdir','invautochdir'),
+ ('invautoindent','invautoindent'),
+ ('invautoread','invautoread'),
+ ('invautowrite','invautowrite'),
+ ('invautowriteall','invautowriteall'),
+ ('invaw','invaw'),
+ ('invawa','invawa'),
+ ('invbackup','invbackup'),
+ ('invballooneval','invballooneval'),
+ ('invbeval','invbeval'),
+ ('invbin','invbin'),
+ ('invbinary','invbinary'),
+ ('invbiosk','invbiosk'),
+ ('invbioskey','invbioskey'),
+ ('invbk','invbk'),
+ ('invbl','invbl'),
+ ('invbomb','invbomb'),
+ ('invbuflisted','invbuflisted'),
+ ('invcf','invcf'),
+ ('invci','invci'),
+ ('invcin','invcin'),
+ ('invcindent','invcindent'),
+ ('invcompatible','invcompatible'),
+ ('invconfirm','invconfirm'),
+ ('invconsk','invconsk'),
+ ('invconskey','invconskey'),
+ ('invcopyindent','invcopyindent'),
+ ('invcp','invcp'),
+ ('invcrb','invcrb'),
+ ('invcscoperelative','invcscoperelative'),
+ ('invcscopetag','invcscopetag'),
+ ('invcscopeverbose','invcscopeverbose'),
+ ('invcsre','invcsre'),
+ ('invcst','invcst'),
+ ('invcsverb','invcsverb'),
+ ('invcuc','invcuc'),
+ ('invcul','invcul'),
+ ('invcursorbind','invcursorbind'),
+ ('invcursorcolumn','invcursorcolumn'),
+ ('invcursorline','invcursorline'),
+ ('invdeco','invdeco'),
+ ('invdelcombine','invdelcombine'),
+ ('invdg','invdg'),
+ ('invdiff','invdiff'),
+ ('invdigraph','invdigraph'),
+ ('invea','invea'),
+ ('inveb','inveb'),
+ ('inved','inved'),
+ ('invedcompatible','invedcompatible'),
+ ('invek','invek'),
+ ('invendofline','invendofline'),
+ ('inveol','inveol'),
+ ('invequalalways','invequalalways'),
+ ('inverrorbells','inverrorbells'),
+ ('invesckeys','invesckeys'),
+ ('invet','invet'),
+ ('invex','invex'),
+ ('invexpandtab','invexpandtab'),
+ ('invexrc','invexrc'),
+ ('invfen','invfen'),
+ ('invfic','invfic'),
+ ('invfileignorecase','invfileignorecase'),
+ ('invfk','invfk'),
+ ('invfkmap','invfkmap'),
+ ('invfoldenable','invfoldenable'),
+ ('invgd','invgd'),
+ ('invgdefault','invgdefault'),
+ ('invguipty','invguipty'),
+ ('invhid','invhid'),
+ ('invhidden','invhidden'),
+ ('invhk','invhk'),
+ ('invhkmap','invhkmap'),
+ ('invhkmapp','invhkmapp'),
+ ('invhkp','invhkp'),
+ ('invhls','invhls'),
+ ('invhlsearch','invhlsearch'),
+ ('invic','invic'),
+ ('invicon','invicon'),
+ ('invignorecase','invignorecase'),
+ ('invim','invim'),
+ ('invimc','invimc'),
+ ('invimcmdline','invimcmdline'),
+ ('invimd','invimd'),
+ ('invimdisable','invimdisable'),
+ ('invincsearch','invincsearch'),
+ ('invinf','invinf'),
+ ('invinfercase','invinfercase'),
+ ('invinsertmode','invinsertmode'),
+ ('invis','invis'),
+ ('invjoinspaces','invjoinspaces'),
+ ('invjs','invjs'),
+ ('invlazyredraw','invlazyredraw'),
+ ('invlbr','invlbr'),
+ ('invlinebreak','invlinebreak'),
+ ('invlisp','invlisp'),
+ ('invlist','invlist'),
+ ('invloadplugins','invloadplugins'),
+ ('invlpl','invlpl'),
+ ('invlz','invlz'),
+ ('invma','invma'),
+ ('invmacatsui','invmacatsui'),
+ ('invmagic','invmagic'),
+ ('invmh','invmh'),
+ ('invml','invml'),
+ ('invmod','invmod'),
+ ('invmodeline','invmodeline'),
+ ('invmodifiable','invmodifiable'),
+ ('invmodified','invmodified'),
+ ('invmore','invmore'),
+ ('invmousef','invmousef'),
+ ('invmousefocus','invmousefocus'),
+ ('invmousehide','invmousehide'),
+ ('invnu','invnu'),
+ ('invnumber','invnumber'),
+ ('invodev','invodev'),
+ ('invopendevice','invopendevice'),
+ ('invpaste','invpaste'),
+ ('invpi','invpi'),
+ ('invpreserveindent','invpreserveindent'),
+ ('invpreviewwindow','invpreviewwindow'),
+ ('invprompt','invprompt'),
+ ('invpvw','invpvw'),
+ ('invreadonly','invreadonly'),
+ ('invrelativenumber','invrelativenumber'),
+ ('invremap','invremap'),
+ ('invrestorescreen','invrestorescreen'),
+ ('invrevins','invrevins'),
+ ('invri','invri'),
+ ('invrightleft','invrightleft'),
+ ('invrl','invrl'),
+ ('invrnu','invrnu'),
+ ('invro','invro'),
+ ('invrs','invrs'),
+ ('invru','invru'),
+ ('invruler','invruler'),
+ ('invsb','invsb'),
+ ('invsc','invsc'),
+ ('invscb','invscb'),
+ ('invscrollbind','invscrollbind'),
+ ('invscs','invscs'),
+ ('invsecure','invsecure'),
+ ('invsft','invsft'),
+ ('invshellslash','invshellslash'),
+ ('invshelltemp','invshelltemp'),
+ ('invshiftround','invshiftround'),
+ ('invshortname','invshortname'),
+ ('invshowcmd','invshowcmd'),
+ ('invshowfulltag','invshowfulltag'),
+ ('invshowmatch','invshowmatch'),
+ ('invshowmode','invshowmode'),
+ ('invsi','invsi'),
+ ('invsm','invsm'),
+ ('invsmartcase','invsmartcase'),
+ ('invsmartindent','invsmartindent'),
+ ('invsmarttab','invsmarttab'),
+ ('invsmd','invsmd'),
+ ('invsn','invsn'),
+ ('invsol','invsol'),
+ ('invspell','invspell'),
+ ('invsplitbelow','invsplitbelow'),
+ ('invsplitright','invsplitright'),
+ ('invspr','invspr'),
+ ('invsr','invsr'),
+ ('invssl','invssl'),
+ ('invsta','invsta'),
+ ('invstartofline','invstartofline'),
+ ('invstmp','invstmp'),
+ ('invswapfile','invswapfile'),
+ ('invswf','invswf'),
+ ('invta','invta'),
+ ('invtagbsearch','invtagbsearch'),
+ ('invtagrelative','invtagrelative'),
+ ('invtagstack','invtagstack'),
+ ('invtbi','invtbi'),
+ ('invtbidi','invtbidi'),
+ ('invtbs','invtbs'),
+ ('invtermbidi','invtermbidi'),
+ ('invterse','invterse'),
+ ('invtextauto','invtextauto'),
+ ('invtextmode','invtextmode'),
+ ('invtf','invtf'),
+ ('invtgst','invtgst'),
+ ('invtildeop','invtildeop'),
+ ('invtimeout','invtimeout'),
+ ('invtitle','invtitle'),
+ ('invto','invto'),
+ ('invtop','invtop'),
+ ('invtr','invtr'),
+ ('invttimeout','invttimeout'),
+ ('invttybuiltin','invttybuiltin'),
+ ('invttyfast','invttyfast'),
+ ('invtx','invtx'),
+ ('invudf','invudf'),
+ ('invundofile','invundofile'),
+ ('invvb','invvb'),
+ ('invvisualbell','invvisualbell'),
+ ('invwa','invwa'),
+ ('invwarn','invwarn'),
+ ('invwb','invwb'),
+ ('invweirdinvert','invweirdinvert'),
+ ('invwfh','invwfh'),
+ ('invwfw','invwfw'),
+ ('invwic','invwic'),
+ ('invwildignorecase','invwildignorecase'),
+ ('invwildmenu','invwildmenu'),
+ ('invwinfixheight','invwinfixheight'),
+ ('invwinfixwidth','invwinfixwidth'),
+ ('invwiv','invwiv'),
+ ('invwmnu','invwmnu'),
+ ('invwrap','invwrap'),
+ ('invwrapscan','invwrapscan'),
+ ('invwrite','invwrite'),
+ ('invwriteany','invwriteany'),
+ ('invwritebackup','invwritebackup'),
+ ('invws','invws'),
+ ('is','is'),
+ ('isf','isf'),
+ ('isfname','isfname'),
+ ('isi','isi'),
+ ('isident','isident'),
+ ('isk','isk'),
+ ('iskeyword','iskeyword'),
+ ('isp','isp'),
+ ('isprint','isprint'),
+ ('joinspaces','joinspaces'),
+ ('js','js'),
+ ('key','key'),
+ ('keymap','keymap'),
+ ('keymodel','keymodel'),
+ ('keywordprg','keywordprg'),
+ ('km','km'),
+ ('kmp','kmp'),
+ ('kp','kp'),
+ ('langmap','langmap'),
+ ('langmenu','langmenu'),
+ ('laststatus','laststatus'),
+ ('lazyredraw','lazyredraw'),
+ ('lbr','lbr'),
+ ('lcs','lcs'),
+ ('linebreak','linebreak'),
+ ('lines','lines'),
+ ('linespace','linespace'),
+ ('lisp','lisp'),
+ ('lispwords','lispwords'),
+ ('list','list'),
+ ('listchars','listchars'),
+ ('lm','lm'),
+ ('lmap','lmap'),
+ ('loadplugins','loadplugins'),
+ ('lpl','lpl'),
+ ('ls','ls'),
+ ('lsp','lsp'),
+ ('lw','lw'),
+ ('lz','lz'),
+ ('ma','ma'),
+ ('macatsui','macatsui'),
+ ('magic','magic'),
+ ('makeef','makeef'),
+ ('makeprg','makeprg'),
+ ('mat','mat'),
+ ('matchpairs','matchpairs'),
+ ('matchtime','matchtime'),
+ ('maxcombine','maxcombine'),
+ ('maxfuncdepth','maxfuncdepth'),
+ ('maxmapdepth','maxmapdepth'),
+ ('maxmem','maxmem'),
+ ('maxmempattern','maxmempattern'),
+ ('maxmemtot','maxmemtot'),
+ ('mco','mco'),
+ ('mef','mef'),
+ ('menuitems','menuitems'),
+ ('mfd','mfd'),
+ ('mh','mh'),
+ ('mis','mis'),
+ ('mkspellmem','mkspellmem'),
+ ('ml','ml'),
+ ('mls','mls'),
+ ('mm','mm'),
+ ('mmd','mmd'),
+ ('mmp','mmp'),
+ ('mmt','mmt'),
+ ('mod','mod'),
+ ('modeline','modeline'),
+ ('modelines','modelines'),
+ ('modifiable','modifiable'),
+ ('modified','modified'),
+ ('more','more'),
+ ('mouse','mouse'),
+ ('mousef','mousef'),
+ ('mousefocus','mousefocus'),
+ ('mousehide','mousehide'),
+ ('mousem','mousem'),
+ ('mousemodel','mousemodel'),
+ ('mouses','mouses'),
+ ('mouseshape','mouseshape'),
+ ('mouset','mouset'),
+ ('mousetime','mousetime'),
+ ('mp','mp'),
+ ('mps','mps'),
+ ('msm','msm'),
+ ('mzq','mzq'),
+ ('mzquantum','mzquantum'),
+ ('nf','nf'),
+ ('nnoremap','nnoremap'),
+ ('noacd','noacd'),
+ ('noai','noai'),
+ ('noakm','noakm'),
+ ('noallowrevins','noallowrevins'),
+ ('noaltkeymap','noaltkeymap'),
+ ('noanti','noanti'),
+ ('noantialias','noantialias'),
+ ('noar','noar'),
+ ('noarab','noarab'),
+ ('noarabic','noarabic'),
+ ('noarabicshape','noarabicshape'),
+ ('noari','noari'),
+ ('noarshape','noarshape'),
+ ('noautochdir','noautochdir'),
+ ('noautoindent','noautoindent'),
+ ('noautoread','noautoread'),
+ ('noautowrite','noautowrite'),
+ ('noautowriteall','noautowriteall'),
+ ('noaw','noaw'),
+ ('noawa','noawa'),
+ ('nobackup','nobackup'),
+ ('noballooneval','noballooneval'),
+ ('nobeval','nobeval'),
+ ('nobin','nobin'),
+ ('nobinary','nobinary'),
+ ('nobiosk','nobiosk'),
+ ('nobioskey','nobioskey'),
+ ('nobk','nobk'),
+ ('nobl','nobl'),
+ ('nobomb','nobomb'),
+ ('nobuflisted','nobuflisted'),
+ ('nocf','nocf'),
+ ('noci','noci'),
+ ('nocin','nocin'),
+ ('nocindent','nocindent'),
+ ('nocompatible','nocompatible'),
+ ('noconfirm','noconfirm'),
+ ('noconsk','noconsk'),
+ ('noconskey','noconskey'),
+ ('nocopyindent','nocopyindent'),
+ ('nocp','nocp'),
+ ('nocrb','nocrb'),
+ ('nocscoperelative','nocscoperelative'),
+ ('nocscopetag','nocscopetag'),
+ ('nocscopeverbose','nocscopeverbose'),
+ ('nocsre','nocsre'),
+ ('nocst','nocst'),
+ ('nocsverb','nocsverb'),
+ ('nocuc','nocuc'),
+ ('nocul','nocul'),
+ ('nocursorbind','nocursorbind'),
+ ('nocursorcolumn','nocursorcolumn'),
+ ('nocursorline','nocursorline'),
+ ('nodeco','nodeco'),
+ ('nodelcombine','nodelcombine'),
+ ('nodg','nodg'),
+ ('nodiff','nodiff'),
+ ('nodigraph','nodigraph'),
+ ('noea','noea'),
+ ('noeb','noeb'),
+ ('noed','noed'),
+ ('noedcompatible','noedcompatible'),
+ ('noek','noek'),
+ ('noendofline','noendofline'),
+ ('noeol','noeol'),
+ ('noequalalways','noequalalways'),
+ ('noerrorbells','noerrorbells'),
+ ('noesckeys','noesckeys'),
+ ('noet','noet'),
+ ('noex','noex'),
+ ('noexpandtab','noexpandtab'),
+ ('noexrc','noexrc'),
+ ('nofen','nofen'),
+ ('nofic','nofic'),
+ ('nofileignorecase','nofileignorecase'),
+ ('nofk','nofk'),
+ ('nofkmap','nofkmap'),
+ ('nofoldenable','nofoldenable'),
+ ('nogd','nogd'),
+ ('nogdefault','nogdefault'),
+ ('noguipty','noguipty'),
+ ('nohid','nohid'),
+ ('nohidden','nohidden'),
+ ('nohk','nohk'),
+ ('nohkmap','nohkmap'),
+ ('nohkmapp','nohkmapp'),
+ ('nohkp','nohkp'),
+ ('nohls','nohls'),
+ ('nohlsearch','nohlsearch'),
+ ('noic','noic'),
+ ('noicon','noicon'),
+ ('noignorecase','noignorecase'),
+ ('noim','noim'),
+ ('noimc','noimc'),
+ ('noimcmdline','noimcmdline'),
+ ('noimd','noimd'),
+ ('noimdisable','noimdisable'),
+ ('noincsearch','noincsearch'),
+ ('noinf','noinf'),
+ ('noinfercase','noinfercase'),
+ ('noinsertmode','noinsertmode'),
+ ('nois','nois'),
+ ('nojoinspaces','nojoinspaces'),
+ ('nojs','nojs'),
+ ('nolazyredraw','nolazyredraw'),
+ ('nolbr','nolbr'),
+ ('nolinebreak','nolinebreak'),
+ ('nolisp','nolisp'),
+ ('nolist','nolist'),
+ ('noloadplugins','noloadplugins'),
+ ('nolpl','nolpl'),
+ ('nolz','nolz'),
+ ('noma','noma'),
+ ('nomacatsui','nomacatsui'),
+ ('nomagic','nomagic'),
+ ('nomh','nomh'),
+ ('noml','noml'),
+ ('nomod','nomod'),
+ ('nomodeline','nomodeline'),
+ ('nomodifiable','nomodifiable'),
+ ('nomodified','nomodified'),
+ ('nomore','nomore'),
+ ('nomousef','nomousef'),
+ ('nomousefocus','nomousefocus'),
+ ('nomousehide','nomousehide'),
+ ('nonu','nonu'),
+ ('nonumber','nonumber'),
+ ('noodev','noodev'),
+ ('noopendevice','noopendevice'),
+ ('nopaste','nopaste'),
+ ('nopi','nopi'),
+ ('nopreserveindent','nopreserveindent'),
+ ('nopreviewwindow','nopreviewwindow'),
+ ('noprompt','noprompt'),
+ ('nopvw','nopvw'),
+ ('noreadonly','noreadonly'),
+ ('norelativenumber','norelativenumber'),
+ ('noremap','noremap'),
+ ('norestorescreen','norestorescreen'),
+ ('norevins','norevins'),
+ ('nori','nori'),
+ ('norightleft','norightleft'),
+ ('norl','norl'),
+ ('nornu','nornu'),
+ ('noro','noro'),
+ ('nors','nors'),
+ ('noru','noru'),
+ ('noruler','noruler'),
+ ('nosb','nosb'),
+ ('nosc','nosc'),
+ ('noscb','noscb'),
+ ('noscrollbind','noscrollbind'),
+ ('noscs','noscs'),
+ ('nosecure','nosecure'),
+ ('nosft','nosft'),
+ ('noshellslash','noshellslash'),
+ ('noshelltemp','noshelltemp'),
+ ('noshiftround','noshiftround'),
+ ('noshortname','noshortname'),
+ ('noshowcmd','noshowcmd'),
+ ('noshowfulltag','noshowfulltag'),
+ ('noshowmatch','noshowmatch'),
+ ('noshowmode','noshowmode'),
+ ('nosi','nosi'),
+ ('nosm','nosm'),
+ ('nosmartcase','nosmartcase'),
+ ('nosmartindent','nosmartindent'),
+ ('nosmarttab','nosmarttab'),
+ ('nosmd','nosmd'),
+ ('nosn','nosn'),
+ ('nosol','nosol'),
+ ('nospell','nospell'),
+ ('nosplitbelow','nosplitbelow'),
+ ('nosplitright','nosplitright'),
+ ('nospr','nospr'),
+ ('nosr','nosr'),
+ ('nossl','nossl'),
+ ('nosta','nosta'),
+ ('nostartofline','nostartofline'),
+ ('nostmp','nostmp'),
+ ('noswapfile','noswapfile'),
+ ('noswf','noswf'),
+ ('nota','nota'),
+ ('notagbsearch','notagbsearch'),
+ ('notagrelative','notagrelative'),
+ ('notagstack','notagstack'),
+ ('notbi','notbi'),
+ ('notbidi','notbidi'),
+ ('notbs','notbs'),
+ ('notermbidi','notermbidi'),
+ ('noterse','noterse'),
+ ('notextauto','notextauto'),
+ ('notextmode','notextmode'),
+ ('notf','notf'),
+ ('notgst','notgst'),
+ ('notildeop','notildeop'),
+ ('notimeout','notimeout'),
+ ('notitle','notitle'),
+ ('noto','noto'),
+ ('notop','notop'),
+ ('notr','notr'),
+ ('nottimeout','nottimeout'),
+ ('nottybuiltin','nottybuiltin'),
+ ('nottyfast','nottyfast'),
+ ('notx','notx'),
+ ('noudf','noudf'),
+ ('noundofile','noundofile'),
+ ('novb','novb'),
+ ('novisualbell','novisualbell'),
+ ('nowa','nowa'),
+ ('nowarn','nowarn'),
+ ('nowb','nowb'),
+ ('noweirdinvert','noweirdinvert'),
+ ('nowfh','nowfh'),
+ ('nowfw','nowfw'),
+ ('nowic','nowic'),
+ ('nowildignorecase','nowildignorecase'),
+ ('nowildmenu','nowildmenu'),
+ ('nowinfixheight','nowinfixheight'),
+ ('nowinfixwidth','nowinfixwidth'),
+ ('nowiv','nowiv'),
+ ('nowmnu','nowmnu'),
+ ('nowrap','nowrap'),
+ ('nowrapscan','nowrapscan'),
+ ('nowrite','nowrite'),
+ ('nowriteany','nowriteany'),
+ ('nowritebackup','nowritebackup'),
+ ('nows','nows'),
+ ('nrformats','nrformats'),
+ ('nu','nu'),
+ ('number','number'),
+ ('numberwidth','numberwidth'),
+ ('nuw','nuw'),
+ ('odev','odev'),
+ ('oft','oft'),
+ ('ofu','ofu'),
+ ('omnifunc','omnifunc'),
+ ('opendevice','opendevice'),
+ ('operatorfunc','operatorfunc'),
+ ('opfunc','opfunc'),
+ ('osfiletype','osfiletype'),
+ ('pa','pa'),
+ ('para','para'),
+ ('paragraphs','paragraphs'),
+ ('paste','paste'),
+ ('pastetoggle','pastetoggle'),
+ ('patchexpr','patchexpr'),
+ ('patchmode','patchmode'),
+ ('path','path'),
+ ('pdev','pdev'),
+ ('penc','penc'),
+ ('pex','pex'),
+ ('pexpr','pexpr'),
+ ('pfn','pfn'),
+ ('ph','ph'),
+ ('pheader','pheader'),
+ ('pi','pi'),
+ ('pm','pm'),
+ ('pmbcs','pmbcs'),
+ ('pmbfn','pmbfn'),
+ ('popt','popt'),
+ ('preserveindent','preserveindent'),
+ ('previewheight','previewheight'),
+ ('previewwindow','previewwindow'),
+ ('printdevice','printdevice'),
+ ('printencoding','printencoding'),
+ ('printexpr','printexpr'),
+ ('printfont','printfont'),
+ ('printheader','printheader'),
+ ('printmbcharset','printmbcharset'),
+ ('printmbfont','printmbfont'),
+ ('printoptions','printoptions'),
+ ('prompt','prompt'),
+ ('pt','pt'),
+ ('pumheight','pumheight'),
+ ('pvh','pvh'),
+ ('pvw','pvw'),
+ ('qe','qe'),
+ ('quoteescape','quoteescape'),
+ ('rdt','rdt'),
+ ('re','re'),
+ ('readonly','readonly'),
+ ('redrawtime','redrawtime'),
+ ('regexpengine','regexpengine'),
+ ('relativenumber','relativenumber'),
+ ('remap','remap'),
+ ('report','report'),
+ ('restorescreen','restorescreen'),
+ ('revins','revins'),
+ ('ri','ri'),
+ ('rightleft','rightleft'),
+ ('rightleftcmd','rightleftcmd'),
+ ('rl','rl'),
+ ('rlc','rlc'),
+ ('rnu','rnu'),
+ ('ro','ro'),
+ ('rs','rs'),
+ ('rtp','rtp'),
+ ('ru','ru'),
+ ('ruf','ruf'),
+ ('ruler','ruler'),
+ ('rulerformat','rulerformat'),
+ ('runtimepath','runtimepath'),
+ ('sb','sb'),
+ ('sbo','sbo'),
+ ('sbr','sbr'),
+ ('sc','sc'),
+ ('scb','scb'),
+ ('scr','scr'),
+ ('scroll','scroll'),
+ ('scrollbind','scrollbind'),
+ ('scrolljump','scrolljump'),
+ ('scrolloff','scrolloff'),
+ ('scrollopt','scrollopt'),
+ ('scs','scs'),
+ ('sect','sect'),
+ ('sections','sections'),
+ ('secure','secure'),
+ ('sel','sel'),
+ ('selection','selection'),
+ ('selectmode','selectmode'),
+ ('sessionoptions','sessionoptions'),
+ ('sft','sft'),
+ ('sh','sh'),
+ ('shcf','shcf'),
+ ('shell','shell'),
+ ('shellcmdflag','shellcmdflag'),
+ ('shellpipe','shellpipe'),
+ ('shellquote','shellquote'),
+ ('shellredir','shellredir'),
+ ('shellslash','shellslash'),
+ ('shelltemp','shelltemp'),
+ ('shelltype','shelltype'),
+ ('shellxescape','shellxescape'),
+ ('shellxquote','shellxquote'),
+ ('shiftround','shiftround'),
+ ('shiftwidth','shiftwidth'),
+ ('shm','shm'),
+ ('shortmess','shortmess'),
+ ('shortname','shortname'),
+ ('showbreak','showbreak'),
+ ('showcmd','showcmd'),
+ ('showfulltag','showfulltag'),
+ ('showmatch','showmatch'),
+ ('showmode','showmode'),
+ ('showtabline','showtabline'),
+ ('shq','shq'),
+ ('si','si'),
+ ('sidescroll','sidescroll'),
+ ('sidescrolloff','sidescrolloff'),
+ ('siso','siso'),
+ ('sj','sj'),
+ ('slm','slm'),
+ ('sm','sm'),
+ ('smartcase','smartcase'),
+ ('smartindent','smartindent'),
+ ('smarttab','smarttab'),
+ ('smc','smc'),
+ ('smd','smd'),
+ ('sn','sn'),
+ ('so','so'),
+ ('softtabstop','softtabstop'),
+ ('sol','sol'),
+ ('sp','sp'),
+ ('spc','spc'),
+ ('spell','spell'),
+ ('spellcapcheck','spellcapcheck'),
+ ('spellfile','spellfile'),
+ ('spelllang','spelllang'),
+ ('spellsuggest','spellsuggest'),
+ ('spf','spf'),
+ ('spl','spl'),
+ ('splitbelow','splitbelow'),
+ ('splitright','splitright'),
+ ('spr','spr'),
+ ('sps','sps'),
+ ('sr','sr'),
+ ('srr','srr'),
+ ('ss','ss'),
+ ('ssl','ssl'),
+ ('ssop','ssop'),
+ ('st','st'),
+ ('sta','sta'),
+ ('stal','stal'),
+ ('startofline','startofline'),
+ ('statusline','statusline'),
+ ('stl','stl'),
+ ('stmp','stmp'),
+ ('sts','sts'),
+ ('su','su'),
+ ('sua','sua'),
+ ('suffixes','suffixes'),
+ ('suffixesadd','suffixesadd'),
+ ('sw','sw'),
+ ('swapfile','swapfile'),
+ ('swapsync','swapsync'),
+ ('swb','swb'),
+ ('swf','swf'),
+ ('switchbuf','switchbuf'),
+ ('sws','sws'),
+ ('sxe','sxe'),
+ ('sxq','sxq'),
+ ('syn','syn'),
+ ('synmaxcol','synmaxcol'),
+ ('syntax','syntax'),
+ ('t_AB','t_AB'),
+ ('t_AF','t_AF'),
+ ('t_AL','t_AL'),
+ ('t_CS','t_CS'),
+ ('t_CV','t_CV'),
+ ('t_Ce','t_Ce'),
+ ('t_Co','t_Co'),
+ ('t_Cs','t_Cs'),
+ ('t_DL','t_DL'),
+ ('t_EI','t_EI'),
+ ('t_F1','t_F1'),
+ ('t_F2','t_F2'),
+ ('t_F3','t_F3'),
+ ('t_F4','t_F4'),
+ ('t_F5','t_F5'),
+ ('t_F6','t_F6'),
+ ('t_F7','t_F7'),
+ ('t_F8','t_F8'),
+ ('t_F9','t_F9'),
+ ('t_IE','t_IE'),
+ ('t_IS','t_IS'),
+ ('t_K1','t_K1'),
+ ('t_K3','t_K3'),
+ ('t_K4','t_K4'),
+ ('t_K5','t_K5'),
+ ('t_K6','t_K6'),
+ ('t_K7','t_K7'),
+ ('t_K8','t_K8'),
+ ('t_K9','t_K9'),
+ ('t_KA','t_KA'),
+ ('t_KB','t_KB'),
+ ('t_KC','t_KC'),
+ ('t_KD','t_KD'),
+ ('t_KE','t_KE'),
+ ('t_KF','t_KF'),
+ ('t_KG','t_KG'),
+ ('t_KH','t_KH'),
+ ('t_KI','t_KI'),
+ ('t_KJ','t_KJ'),
+ ('t_KK','t_KK'),
+ ('t_KL','t_KL'),
+ ('t_RI','t_RI'),
+ ('t_RV','t_RV'),
+ ('t_SI','t_SI'),
+ ('t_Sb','t_Sb'),
+ ('t_Sf','t_Sf'),
+ ('t_WP','t_WP'),
+ ('t_WS','t_WS'),
+ ('t_ZH','t_ZH'),
+ ('t_ZR','t_ZR'),
+ ('t_al','t_al'),
+ ('t_bc','t_bc'),
+ ('t_cd','t_cd'),
+ ('t_ce','t_ce'),
+ ('t_cl','t_cl'),
+ ('t_cm','t_cm'),
+ ('t_cs','t_cs'),
+ ('t_da','t_da'),
+ ('t_db','t_db'),
+ ('t_dl','t_dl'),
+ ('t_fs','t_fs'),
+ ('t_k1','t_k1'),
+ ('t_k2','t_k2'),
+ ('t_k3','t_k3'),
+ ('t_k4','t_k4'),
+ ('t_k5','t_k5'),
+ ('t_k6','t_k6'),
+ ('t_k7','t_k7'),
+ ('t_k8','t_k8'),
+ ('t_k9','t_k9'),
+ ('t_kB','t_kB'),
+ ('t_kD','t_kD'),
+ ('t_kI','t_kI'),
+ ('t_kN','t_kN'),
+ ('t_kP','t_kP'),
+ ('t_kb','t_kb'),
+ ('t_kd','t_kd'),
+ ('t_ke','t_ke'),
+ ('t_kh','t_kh'),
+ ('t_kl','t_kl'),
+ ('t_kr','t_kr'),
+ ('t_ks','t_ks'),
+ ('t_ku','t_ku'),
+ ('t_le','t_le'),
+ ('t_mb','t_mb'),
+ ('t_md','t_md'),
+ ('t_me','t_me'),
+ ('t_mr','t_mr'),
+ ('t_ms','t_ms'),
+ ('t_nd','t_nd'),
+ ('t_op','t_op'),
+ ('t_se','t_se'),
+ ('t_so','t_so'),
+ ('t_sr','t_sr'),
+ ('t_te','t_te'),
+ ('t_ti','t_ti'),
+ ('t_ts','t_ts'),
+ ('t_u7','t_u7'),
+ ('t_ue','t_ue'),
+ ('t_us','t_us'),
+ ('t_ut','t_ut'),
+ ('t_vb','t_vb'),
+ ('t_ve','t_ve'),
+ ('t_vi','t_vi'),
+ ('t_vs','t_vs'),
+ ('t_xs','t_xs'),
+ ('ta','ta'),
+ ('tabline','tabline'),
+ ('tabpagemax','tabpagemax'),
+ ('tabstop','tabstop'),
+ ('tag','tag'),
+ ('tagbsearch','tagbsearch'),
+ ('taglength','taglength'),
+ ('tagrelative','tagrelative'),
+ ('tags','tags'),
+ ('tagstack','tagstack'),
+ ('tal','tal'),
+ ('tb','tb'),
+ ('tbi','tbi'),
+ ('tbidi','tbidi'),
+ ('tbis','tbis'),
+ ('tbs','tbs'),
+ ('tenc','tenc'),
+ ('term','term'),
+ ('termbidi','termbidi'),
+ ('termencoding','termencoding'),
+ ('terse','terse'),
+ ('textauto','textauto'),
+ ('textmode','textmode'),
+ ('textwidth','textwidth'),
+ ('tf','tf'),
+ ('tgst','tgst'),
+ ('thesaurus','thesaurus'),
+ ('tildeop','tildeop'),
+ ('timeout','timeout'),
+ ('timeoutlen','timeoutlen'),
+ ('title','title'),
+ ('titlelen','titlelen'),
+ ('titleold','titleold'),
+ ('titlestring','titlestring'),
+ ('tl','tl'),
+ ('tm','tm'),
+ ('to','to'),
+ ('toolbar','toolbar'),
+ ('toolbariconsize','toolbariconsize'),
+ ('top','top'),
+ ('tpm','tpm'),
+ ('tr','tr'),
+ ('ts','ts'),
+ ('tsl','tsl'),
+ ('tsr','tsr'),
+ ('ttimeout','ttimeout'),
+ ('ttimeoutlen','ttimeoutlen'),
+ ('ttm','ttm'),
+ ('tty','tty'),
+ ('ttybuiltin','ttybuiltin'),
+ ('ttyfast','ttyfast'),
+ ('ttym','ttym'),
+ ('ttymouse','ttymouse'),
+ ('ttyscroll','ttyscroll'),
+ ('ttytype','ttytype'),
+ ('tw','tw'),
+ ('tx','tx'),
+ ('uc','uc'),
+ ('udf','udf'),
+ ('udir','udir'),
+ ('ul','ul'),
+ ('undodir','undodir'),
+ ('undofile','undofile'),
+ ('undolevels','undolevels'),
+ ('undoreload','undoreload'),
+ ('updatecount','updatecount'),
+ ('updatetime','updatetime'),
+ ('ur','ur'),
+ ('ut','ut'),
+ ('vb','vb'),
+ ('vbs','vbs'),
+ ('vdir','vdir'),
+ ('ve','ve'),
+ ('verbose','verbose'),
+ ('verbosefile','verbosefile'),
+ ('vfile','vfile'),
+ ('vi','vi'),
+ ('viewdir','viewdir'),
+ ('viewoptions','viewoptions'),
+ ('viminfo','viminfo'),
+ ('virtualedit','virtualedit'),
+ ('visualbell','visualbell'),
+ ('vnoremap','vnoremap'),
+ ('vop','vop'),
+ ('wa','wa'),
+ ('wak','wak'),
+ ('warn','warn'),
+ ('wb','wb'),
+ ('wc','wc'),
+ ('wcm','wcm'),
+ ('wd','wd'),
+ ('weirdinvert','weirdinvert'),
+ ('wfh','wfh'),
+ ('wfw','wfw'),
+ ('wh','wh'),
+ ('whichwrap','whichwrap'),
+ ('wi','wi'),
+ ('wic','wic'),
+ ('wig','wig'),
+ ('wildchar','wildchar'),
+ ('wildcharm','wildcharm'),
+ ('wildignore','wildignore'),
+ ('wildignorecase','wildignorecase'),
+ ('wildmenu','wildmenu'),
+ ('wildmode','wildmode'),
+ ('wildoptions','wildoptions'),
+ ('wim','wim'),
+ ('winaltkeys','winaltkeys'),
+ ('window','window'),
+ ('winfixheight','winfixheight'),
+ ('winfixwidth','winfixwidth'),
+ ('winheight','winheight'),
+ ('winminheight','winminheight'),
+ ('winminwidth','winminwidth'),
+ ('winwidth','winwidth'),
+ ('wiv','wiv'),
+ ('wiw','wiw'),
+ ('wm','wm'),
+ ('wmh','wmh'),
+ ('wmnu','wmnu'),
+ ('wmw','wmw'),
+ ('wop','wop'),
+ ('wrap','wrap'),
+ ('wrapmargin','wrapmargin'),
+ ('wrapscan','wrapscan'),
+ ('write','write'),
+ ('writeany','writeany'),
+ ('writebackup','writebackup'),
+ ('writedelay','writedelay'),
+ ('ws','ws'),
+ ('ww','ww'),
+ )
+ return var
+option = _getoption()
+
diff --git a/pygments/lexers/actionscript.py b/pygments/lexers/actionscript.py
new file mode 100644
index 0000000..3a5bbf2
--- /dev/null
+++ b/pygments/lexers/actionscript.py
@@ -0,0 +1,245 @@
+"""
+ pygments.lexers.actionscript
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for ActionScript and MXML.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, using, this, words, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
+
+
+class ActionScriptLexer(RegexLexer):
+ """
+ For ActionScript source code.
+
+ .. versionadded:: 0.9
+ """
+
+ name = 'ActionScript'
+ aliases = ['actionscript', 'as']
+ filenames = ['*.as']
+ mimetypes = ['application/x-actionscript', 'text/x-actionscript',
+ 'text/actionscript']
+
+ flags = re.DOTALL
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'/(\\\\|\\[^\\]|[^/\\\n])*/[gim]*', String.Regex),
+ (r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
+ (r'[{}\[\]();.]+', Punctuation),
+ (words((
+ 'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
+ 'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
+ 'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
+ 'switch'), suffix=r'\b'),
+ Keyword),
+ (words((
+ 'class', 'public', 'final', 'internal', 'native', 'override', 'private',
+ 'protected', 'static', 'import', 'extends', 'implements', 'interface',
+ 'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
+ 'namespace', 'package', 'set'), suffix=r'\b'),
+ Keyword.Declaration),
+ (r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
+ Keyword.Constant),
+ (words((
+ 'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
+ 'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
+ 'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
+ 'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
+ 'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
+ 'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
+ 'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
+ 'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
+ 'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
+ 'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
+ 'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
+ 'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
+ 'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
+ 'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
+ 'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
+ 'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
+ 'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
+ 'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
+ 'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
+ 'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
+ 'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
+ 'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
+ 'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
+ 'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
+ 'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
+ 'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
+ 'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
+ 'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
+ 'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
+ 'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
+ 'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
+ 'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
+ 'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
+ 'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
+ 'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
+ 'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
+ 'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
+ 'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
+ 'XMLUI'), suffix=r'\b'),
+ Name.Builtin),
+ (words((
+ 'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
+ 'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
+ 'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
+ 'unescape'), suffix=r'\b'),
+ Name.Function),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ]
+ }
+
+ def analyse_text(text):
+ """This is only used to disambiguate between ActionScript and
+ ActionScript3. We return 0 here; the ActionScript3 lexer will match
+ AS3 variable definitions and that will hopefully suffice."""
+ return 0
+
+class ActionScript3Lexer(RegexLexer):
+ """
+ For ActionScript 3 source code.
+
+ .. versionadded:: 0.11
+ """
+
+ name = 'ActionScript 3'
+ url = 'https://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/index.html'
+ aliases = ['actionscript3', 'as3']
+ filenames = ['*.as']
+ mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
+ 'text/actionscript3']
+
+ identifier = r'[$a-zA-Z_]\w*'
+ typeidentifier = identifier + r'(?:\.<\w+>)?'
+
+ flags = re.DOTALL | re.MULTILINE
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(function\s+)(' + identifier + r')(\s*)(\()',
+ bygroups(Keyword.Declaration, Name.Function, Text, Operator),
+ 'funcparams'),
+ (r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
+ typeidentifier + r')',
+ bygroups(Keyword.Declaration, Whitespace, Name, Whitespace, Punctuation, Whitespace,
+ Keyword.Type)),
+ (r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
+ bygroups(Keyword, Whitespace, Name.Namespace, Whitespace)),
+ (r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
+ bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Operator)),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'/(\\\\|\\[^\\]|[^\\\n])*/[gisx]*', String.Regex),
+ (r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
+ (r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
+ r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
+ r'switch|import|include|as|is)\b',
+ Keyword),
+ (r'(class|public|final|internal|native|override|private|protected|'
+ r'static|import|extends|implements|interface|intrinsic|return|super|'
+ r'dynamic|function|const|get|namespace|package|set)\b',
+ Keyword.Declaration),
+ (r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
+ Keyword.Constant),
+ (r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
+ r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
+ r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
+ r'unescape)\b', Name.Function),
+ (identifier, Name),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
+ ],
+ 'funcparams': [
+ (r'\s+', Whitespace),
+ (r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
+ typeidentifier + r'|\*)(\s*)',
+ bygroups(Whitespace, Punctuation, Name, Whitespace, Operator, Whitespace,
+ Keyword.Type, Whitespace), 'defval'),
+ (r'\)', Operator, 'type')
+ ],
+ 'type': [
+ (r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
+ bygroups(Whitespace, Operator, Whitespace, Keyword.Type), '#pop:2'),
+ (r'\s+', Text, '#pop:2'),
+ default('#pop:2')
+ ],
+ 'defval': [
+ (r'(=)(\s*)([^(),]+)(\s*)(,?)',
+ bygroups(Operator, Whitespace, using(this), Whitespace, Operator), '#pop'),
+ (r',', Operator, '#pop'),
+ default('#pop')
+ ]
+ }
+
+ def analyse_text(text):
+ if re.match(r'\w+\s*:\s*\w', text):
+ return 0.3
+ return 0
+
+
+class MxmlLexer(RegexLexer):
+ """
+ For MXML markup.
+ Nested AS3 in <script> tags is highlighted by the appropriate lexer.
+
+ .. versionadded:: 1.1
+ """
+ flags = re.MULTILINE | re.DOTALL
+ name = 'MXML'
+ aliases = ['mxml']
+ filenames = ['*.mxml']
+ mimetimes = ['text/xml', 'application/xml']
+
+ tokens = {
+ 'root': [
+ ('[^<&]+', Text),
+ (r'&\S*?;', Name.Entity),
+ (r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
+ bygroups(String, using(ActionScript3Lexer), String)),
+ ('<!--', Comment, 'comment'),
+ (r'<\?.*?\?>', Comment.Preproc),
+ ('<![^>]*>', Comment.Preproc),
+ (r'<\s*[\w:.-]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
+ ],
+ 'comment': [
+ ('[^-]+', Comment),
+ ('-->', Comment, '#pop'),
+ ('-', Comment),
+ ],
+ 'tag': [
+ (r'\s+', Whitespace),
+ (r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
+ (r'/?\s*>', Name.Tag, '#pop'),
+ ],
+ 'attr': [
+ (r'\s+', Whitespace),
+ ('".*?"', String, '#pop'),
+ ("'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/ada.py b/pygments/lexers/ada.py
new file mode 100644
index 0000000..34fe989
--- /dev/null
+++ b/pygments/lexers/ada.py
@@ -0,0 +1,144 @@
+"""
+ pygments.lexers.ada
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Ada family languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, words, using, this, \
+ default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+from pygments.lexers._ada_builtins import KEYWORD_LIST, BUILTIN_LIST
+
+__all__ = ['AdaLexer']
+
+
+class AdaLexer(RegexLexer):
+ """
+ For Ada source code.
+
+ .. versionadded:: 1.3
+ """
+
+ name = 'Ada'
+ aliases = ['ada', 'ada95', 'ada2005']
+ filenames = ['*.adb', '*.ads', '*.ada']
+ mimetypes = ['text/x-ada']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'--.*?\n', Comment.Single),
+ (r'[^\S\n]+', Text),
+ (r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
+ (r'(subtype|type)(\s+)(\w+)',
+ bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
+ (r'task|protected', Keyword.Declaration),
+ (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
+ (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
+ (r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text,
+ Comment.Preproc)),
+ (r'(true|false|null)\b', Keyword.Constant),
+ # builtin types
+ (words(BUILTIN_LIST, suffix=r'\b'), Keyword.Type),
+ (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
+ (r'generic|private', Keyword.Declaration),
+ (r'package', Keyword.Declaration, 'package'),
+ (r'array\b', Keyword.Reserved, 'array_def'),
+ (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
+ (r'(\w+)(\s*)(:)(\s*)(constant)',
+ bygroups(Name.Constant, Text, Punctuation, Text,
+ Keyword.Reserved)),
+ (r'<<\w+>>', Name.Label),
+ (r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
+ bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
+ # keywords
+ (words(KEYWORD_LIST, prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ (r'"[^"]*"', String),
+ include('attribute'),
+ include('numbers'),
+ (r"'[^']'", String.Character),
+ (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
+ (r"(<>|=>|:=|@|[\[\]]|[()|:;,.'])", Punctuation),
+ (r'[*<>+=/&-]', Operator),
+ (r'\n+', Text),
+ ],
+ 'numbers': [
+ (r'[0-9_]+#[0-9a-f_\.]+#', Number.Hex),
+ (r'[0-9_]+\.[0-9_]*', Number.Float),
+ (r'[0-9_]+', Number.Integer),
+ ],
+ 'attribute': [
+ (r"(')(\w+)", bygroups(Punctuation, Name.Attribute)),
+ ],
+ 'subprogram': [
+ (r'\(', Punctuation, ('#pop', 'formal_part')),
+ (r';', Punctuation, '#pop'),
+ (r'is\b', Keyword.Reserved, '#pop'),
+ (r'"[^"]+"|\w+', Name.Function),
+ include('root'),
+ ],
+ 'end': [
+ ('(if|case|record|loop|select)', Keyword.Reserved),
+ (r'"[^"]+"|[\w.]+', Name.Function),
+ (r'\s+', Text),
+ (';', Punctuation, '#pop'),
+ ],
+ 'type_def': [
+ (r';', Punctuation, '#pop'),
+ (r'\(', Punctuation, 'formal_part'),
+ (r'\[', Punctuation, 'formal_part'),
+ (r'with|and|use', Keyword.Reserved),
+ (r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
+ (r'record\b', Keyword.Reserved, ('record_def')),
+ (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
+ include('root'),
+ ],
+ 'array_def': [
+ (r';', Punctuation, '#pop'),
+ (r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)),
+ include('root'),
+ ],
+ 'record_def': [
+ (r'end record', Keyword.Reserved, '#pop'),
+ include('root'),
+ ],
+ 'import': [
+ # TODO: use Name.Namespace if appropriate. This needs
+ # work to disinguish imports from aspects.
+ (r'[\w.]+', Name, '#pop'),
+ default('#pop'),
+ ],
+ 'formal_part': [
+ (r'\)', Punctuation, '#pop'),
+ (r'\]', Punctuation, '#pop'),
+ (r'\w+', Name.Variable),
+ (r',|:[^=]', Punctuation),
+ (r'(in|not|null|out|access)\b', Keyword.Reserved),
+ include('root'),
+ ],
+ 'package': [
+ ('body', Keyword.Declaration),
+ (r'is\s+new|renames', Keyword.Reserved),
+ ('is', Keyword.Reserved, '#pop'),
+ (';', Punctuation, '#pop'),
+ (r'\(', Punctuation, 'package_instantiation'),
+ (r'([\w.]+)', Name.Class),
+ include('root'),
+ ],
+ 'package_instantiation': [
+ (r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)),
+ (r'[\w.\'"]', Text),
+ (r'\)', Punctuation, '#pop'),
+ include('root'),
+ ],
+ }
diff --git a/pygments/lexers/agile.py b/pygments/lexers/agile.py
new file mode 100644
index 0000000..7c903a6
--- /dev/null
+++ b/pygments/lexers/agile.py
@@ -0,0 +1,23 @@
+"""
+ pygments.lexers.agile
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Just export lexer classes previously contained in this module.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.lisp import SchemeLexer
+from pygments.lexers.jvm import IokeLexer, ClojureLexer
+from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
+ PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
+from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
+from pygments.lexers.perl import PerlLexer, Perl6Lexer
+from pygments.lexers.d import CrocLexer, MiniDLexer
+from pygments.lexers.iolang import IoLexer
+from pygments.lexers.tcl import TclLexer
+from pygments.lexers.factor import FactorLexer
+from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
+
+__all__ = []
diff --git a/pygments/lexers/algebra.py b/pygments/lexers/algebra.py
new file mode 100644
index 0000000..8c97ecc
--- /dev/null
+++ b/pygments/lexers/algebra.py
@@ -0,0 +1,302 @@
+"""
+ pygments.lexers.algebra
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for computer algebra systems.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+
+__all__ = ['GAPLexer', 'GAPConsoleLexer', 'MathematicaLexer', 'MuPADLexer',
+ 'BCLexer']
+
+
+class GAPLexer(RegexLexer):
+ """
+ For GAP source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'GAP'
+ url = 'http://www.gap-system.org'
+ aliases = ['gap']
+ filenames = ['*.g', '*.gd', '*.gi', '*.gap']
+
+ tokens = {
+ 'root': [
+ (r'#.*$', Comment.Single),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
+ (r'''(?x)\b(?:
+ if|then|elif|else|fi|
+ for|while|do|od|
+ repeat|until|
+ break|continue|
+ function|local|return|end|
+ rec|
+ quit|QUIT|
+ IsBound|Unbind|
+ TryNextMethod|
+ Info|Assert
+ )\b''', Keyword),
+ (r'''(?x)\b(?:
+ true|false|fail|infinity
+ )\b''',
+ Name.Constant),
+ (r'''(?x)\b(?:
+ (Declare|Install)([A-Z][A-Za-z]+)|
+ BindGlobal|BIND_GLOBAL
+ )\b''',
+ Name.Builtin),
+ (r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
+ (r'''(?x)\b(?:
+ and|or|not|mod|in
+ )\b''',
+ Operator.Word),
+ (r'''(?x)
+ (?:\w+|`[^`]*`)
+ (?:::\w+|`[^`]*`)*''', Name.Variable),
+ (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
+ (r'\.[0-9]+(?:e[0-9]+)?', Number),
+ (r'.', Text)
+ ],
+ }
+
+ def analyse_text(text):
+ score = 0.0
+
+ # Declaration part
+ if re.search(
+ r"(InstallTrueMethod|Declare(Attribute|Category|Filter|Operation" +
+ r"|GlobalFunction|Synonym|SynonymAttr|Property))", text
+ ):
+ score += 0.7
+
+ # Implementation part
+ if re.search(
+ r"(DeclareRepresentation|Install(GlobalFunction|Method|" +
+ r"ImmediateMethod|OtherMethod)|New(Family|Type)|Objectify)", text
+ ):
+ score += 0.7
+
+ return min(score, 1.0)
+
+
+class GAPConsoleLexer(Lexer):
+ """
+ For GAP console sessions. Modeled after JuliaConsoleLexer.
+
+ .. versionadded:: 2.14
+ """
+ name = 'GAP session'
+ aliases = ['gap-console', 'gap-repl']
+ filenames = ['*.tst']
+
+ def get_tokens_unprocessed(self, text):
+ gaplexer = GAPLexer(**self.options)
+ start = 0
+ curcode = ''
+ insertions = []
+ output = False
+ error = False
+
+ for line in text.splitlines(keepends=True):
+ if line.startswith('gap> ') or line.startswith('brk> '):
+ insertions.append((len(curcode), [(0, Generic.Prompt, line[:5])]))
+ curcode += line[5:]
+ output = False
+ error = False
+ elif not output and line.startswith('> '):
+ insertions.append((len(curcode), [(0, Generic.Prompt, line[:2])]))
+ curcode += line[2:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, gaplexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ if line.startswith('Error, ') or error:
+ yield start, Generic.Error, line
+ error = True
+ else:
+ yield start, Generic.Output, line
+ output = True
+ start += len(line)
+
+ if curcode:
+ yield from do_insertions(
+ insertions, gaplexer.get_tokens_unprocessed(curcode))
+
+ # the following is needed to distinguish Scilab and GAP .tst files
+ def analyse_text(text):
+ # GAP prompts are a dead give away, although hypothetical;y a
+ # file in another language could be trying to compare a variable
+ # "gap" as in "gap> 0.1". But that this should happen at the
+ # start of a line seems unlikely...
+ if re.search(r"^gap> ", text):
+ return 0.9
+ else:
+ return 0.0
+
+
+class MathematicaLexer(RegexLexer):
+ """
+ Lexer for Mathematica source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Mathematica'
+ url = 'http://www.wolfram.com/mathematica/'
+ aliases = ['mathematica', 'mma', 'nb']
+ filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
+ mimetypes = ['application/mathematica',
+ 'application/vnd.wolfram.mathematica',
+ 'application/vnd.wolfram.mathematica.package',
+ 'application/vnd.wolfram.cdf']
+
+ # http://reference.wolfram.com/mathematica/guide/Syntax.html
+ operators = (
+ ";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
+ "^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
+ "@@@", "~~", "===", "&", "<", ">", "<=", ">=",
+ )
+
+ punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
+
+ def _multi_escape(entries):
+ return '(%s)' % ('|'.join(re.escape(entry) for entry in entries))
+
+ tokens = {
+ 'root': [
+ (r'(?s)\(\*.*?\*\)', Comment),
+
+ (r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
+ (r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
+ (r'#\d*', Name.Variable),
+ (r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
+
+ (r'-?\d+\.\d*', Number.Float),
+ (r'-?\d*\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+
+ (words(operators), Operator),
+ (words(punctuation), Punctuation),
+ (r'".*?"', String),
+ (r'\s+', Text.Whitespace),
+ ],
+ }
+
+
+class MuPADLexer(RegexLexer):
+ """
+ A MuPAD lexer.
+ Contributed by Christopher Creutzig <christopher@creutzig.de>.
+
+ .. versionadded:: 0.8
+ """
+ name = 'MuPAD'
+ url = 'http://www.mupad.com'
+ aliases = ['mupad']
+ filenames = ['*.mu']
+
+ tokens = {
+ 'root': [
+ (r'//.*?$', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
+ (r'''(?x)\b(?:
+ next|break|end|
+ axiom|end_axiom|category|end_category|domain|end_domain|inherits|
+ if|%if|then|elif|else|end_if|
+ case|of|do|otherwise|end_case|
+ while|end_while|
+ repeat|until|end_repeat|
+ for|from|to|downto|step|end_for|
+ proc|local|option|save|begin|end_proc|
+ delete|frame
+ )\b''', Keyword),
+ (r'''(?x)\b(?:
+ DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
+ DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
+ DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
+ DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
+ )\b''', Name.Class),
+ (r'''(?x)\b(?:
+ PI|EULER|E|CATALAN|
+ NIL|FAIL|undefined|infinity|
+ TRUE|FALSE|UNKNOWN
+ )\b''',
+ Name.Constant),
+ (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
+ (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
+ (r'''(?x)\b(?:
+ and|or|not|xor|
+ assuming|
+ div|mod|
+ union|minus|intersect|in|subset
+ )\b''',
+ Operator.Word),
+ (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
+ # (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
+ (r'''(?x)
+ ((?:[a-zA-Z_#][\w#]*|`[^`]*`)
+ (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'''(?x)
+ (?:[a-zA-Z_#][\w#]*|`[^`]*`)
+ (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
+ (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
+ (r'\.[0-9]+(?:e[0-9]+)?', Number),
+ (r'\s+', Whitespace),
+ (r'.', Text)
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ }
+
+
+class BCLexer(RegexLexer):
+ """
+ A BC lexer.
+
+ .. versionadded:: 2.1
+ """
+ name = 'BC'
+ url = 'https://www.gnu.org/software/bc/'
+ aliases = ['bc']
+ filenames = ['*.bc']
+
+ tokens = {
+ 'root': [
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'[{}();,]', Punctuation),
+ (words(('if', 'else', 'while', 'for', 'break', 'continue',
+ 'halt', 'return', 'define', 'auto', 'print', 'read',
+ 'length', 'scale', 'sqrt', 'limits', 'quit',
+ 'warranty'), suffix=r'\b'), Keyword),
+ (r'\+\+|--|\|\||&&|'
+ r'([-<>+*%\^/!=])=?', Operator),
+ # bc doesn't support exponential
+ (r'[0-9]+(\.[0-9]*)?', Number),
+ (r'\.[0-9]+', Number),
+ (r'.', Text)
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ }
diff --git a/pygments/lexers/ambient.py b/pygments/lexers/ambient.py
new file mode 100644
index 0000000..abe6126
--- /dev/null
+++ b/pygments/lexers/ambient.py
@@ -0,0 +1,76 @@
+"""
+ pygments.lexers.ambient
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for AmbientTalk language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words, bygroups
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['AmbientTalkLexer']
+
+
+class AmbientTalkLexer(RegexLexer):
+ """
+ Lexer for AmbientTalk source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'AmbientTalk'
+ url = 'https://code.google.com/p/ambienttalk'
+ filenames = ['*.at']
+ aliases = ['ambienttalk', 'ambienttalk/2', 'at']
+ mimetypes = ['text/x-ambienttalk']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
+ 'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
+ 'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
+ 'mirroredBy:', 'is:'))
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'(def|deftype|import|alias|exclude)\b', Keyword),
+ (builtin, Name.Builtin),
+ (r'(true|false|nil)\b', Keyword.Constant),
+ (r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r'\|', Punctuation, 'arglist'),
+ (r'<:|[*^!%&<>+=,./?-]|:=', Operator),
+ (r"`[a-zA-Z_]\w*", String.Symbol),
+ (r"[a-zA-Z_]\w*:", Name.Function),
+ (r"[{}()\[\];`]", Punctuation),
+ (r'(self|super)\b', Name.Variable.Instance),
+ (r"[a-zA-Z_]\w*", Name.Variable),
+ (r"@[a-zA-Z_]\w*", Name.Class),
+ (r"@\[", Name.Class, 'annotations'),
+ include('numbers'),
+ ],
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+', Number.Integer)
+ ],
+ 'namespace': [
+ (r'[a-zA-Z_]\w*\.', Name.Namespace),
+ (r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
+ (r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
+ ],
+ 'annotations': [
+ (r"(.*?)\]", Name.Class, '#pop')
+ ],
+ 'arglist': [
+ (r'\|', Punctuation, '#pop'),
+ (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
+ (r'[a-zA-Z_]\w*', Name.Variable),
+ ],
+ }
diff --git a/pygments/lexers/amdgpu.py b/pygments/lexers/amdgpu.py
new file mode 100644
index 0000000..5356b22
--- /dev/null
+++ b/pygments/lexers/amdgpu.py
@@ -0,0 +1,53 @@
+"""
+ pygments.lexers.amdgpu
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the AMDGPU ISA assembly.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Name, Text, Keyword, Whitespace, Number, Comment
+
+import re
+
+__all__ = ['AMDGPULexer']
+
+
+class AMDGPULexer(RegexLexer):
+ """
+ For AMD GPU assembly.
+
+ .. versionadded:: 2.8
+ """
+ name = 'AMDGPU'
+ aliases = ['amdgpu']
+ filenames = ['*.isa']
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'[\r\n]+', Text),
+ (r'(([a-z_0-9])*:([a-z_0-9])*)', Name.Attribute),
+ (r'(\[|\]|\(|\)|,|\:|\&)', Text),
+ (r'([;#]|//).*?\n', Comment.Single),
+ (r'((s_)?(ds|buffer|flat|image)_[a-z0-9_]+)', Keyword.Reserved),
+ (r'(_lo|_hi)', Name.Variable),
+ (r'(vmcnt|lgkmcnt|expcnt)', Name.Attribute),
+ (words((
+ 'op', 'vaddr', 'vdata', 'soffset', 'srsrc', 'format',
+ 'offset', 'offen', 'idxen', 'glc', 'dlc', 'slc', 'tfe', 'lds',
+ 'lit', 'unorm'), suffix=r'\b'), Name.Attribute),
+ (r'(label_[a-z0-9]+)', Keyword),
+ (r'(_L[0-9]*)', Name.Variable),
+ (r'(s|v)_[a-z0-9_]+', Keyword),
+ (r'(v[0-9.]+|vcc|exec|v)', Name.Variable),
+ (r's[0-9.]+|s', Name.Variable),
+ (r'[0-9]+\.[^0-9]+', Number.Float),
+ (r'(0[xX][a-z0-9]+)|([0-9]+)', Number.Integer)
+ ]
+ }
diff --git a/pygments/lexers/ampl.py b/pygments/lexers/ampl.py
new file mode 100644
index 0000000..a3cf605
--- /dev/null
+++ b/pygments/lexers/ampl.py
@@ -0,0 +1,88 @@
+"""
+ pygments.lexers.ampl
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the AMPL language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, using, this, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['AmplLexer']
+
+
+class AmplLexer(RegexLexer):
+ """
+ For AMPL source code.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Ampl'
+ url = 'http://ampl.com/'
+ aliases = ['ampl']
+ filenames = ['*.run']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'\s+', Whitespace),
+ (r'#.*?\n', Comment.Single),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (words((
+ 'call', 'cd', 'close', 'commands', 'data', 'delete', 'display',
+ 'drop', 'end', 'environ', 'exit', 'expand', 'include', 'load',
+ 'model', 'objective', 'option', 'problem', 'purge', 'quit',
+ 'redeclare', 'reload', 'remove', 'reset', 'restore', 'shell',
+ 'show', 'solexpand', 'solution', 'solve', 'update', 'unload',
+ 'xref', 'coeff', 'coef', 'cover', 'obj', 'interval', 'default',
+ 'from', 'to', 'to_come', 'net_in', 'net_out', 'dimen',
+ 'dimension', 'check', 'complements', 'write', 'function',
+ 'pipe', 'format', 'if', 'then', 'else', 'in', 'while', 'repeat',
+ 'for'), suffix=r'\b'), Keyword.Reserved),
+ (r'(integer|binary|symbolic|ordered|circular|reversed|INOUT|IN|OUT|LOCAL)',
+ Keyword.Type),
+ (r'\".*?\"', String.Double),
+ (r'\'.*?\'', String.Single),
+ (r'[()\[\]{},;:]+', Punctuation),
+ (r'\b(\w+)(\.)(astatus|init0|init|lb0|lb1|lb2|lb|lrc|'
+ r'lslack|rc|relax|slack|sstatus|status|ub0|ub1|ub2|'
+ r'ub|urc|uslack|val)',
+ bygroups(Name.Variable, Punctuation, Keyword.Reserved)),
+ (r'(set|param|var|arc|minimize|maximize|subject to|s\.t\.|subj to|'
+ r'node|table|suffix|read table|write table)(\s+)(\w+)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Variable)),
+ (r'(param)(\s*)(:)(\s*)(\w+)(\s*)(:)(\s*)((\w|\s)+)',
+ bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace,
+ Name.Variable, Whitespace, Punctuation, Whitespace, Name.Variable)),
+ (r'(let|fix|unfix)(\s*)((?:\{.*\})?)(\s*)(\w+)',
+ bygroups(Keyword.Declaration, Whitespace, using(this), Whitespace,
+ Name.Variable)),
+ (words((
+ 'abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2',
+ 'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10',
+ 'max', 'min', 'precision', 'round', 'sin', 'sinh', 'sqrt', 'tan',
+ 'tanh', 'time', 'trunc', 'Beta', 'Cauchy', 'Exponential', 'Gamma',
+ 'Irand224', 'Normal', 'Normal01', 'Poisson', 'Uniform', 'Uniform01',
+ 'num', 'num0', 'ichar', 'char', 'length', 'substr', 'sprintf',
+ 'match', 'sub', 'gsub', 'print', 'printf', 'next', 'nextw', 'prev',
+ 'prevw', 'first', 'last', 'ord', 'ord0', 'card', 'arity',
+ 'indexarity'), prefix=r'\b', suffix=r'\b'), Name.Builtin),
+ (r'(\+|\-|\*|/|\*\*|=|<=|>=|==|\||\^|<|>|\!|\.\.|:=|\&|\!=|<<|>>)',
+ Operator),
+ (words((
+ 'or', 'exists', 'forall', 'and', 'in', 'not', 'within', 'union',
+ 'diff', 'difference', 'symdiff', 'inter', 'intersect',
+ 'intersection', 'cross', 'setof', 'by', 'less', 'sum', 'prod',
+ 'product', 'div', 'mod'), suffix=r'\b'),
+ Keyword.Reserved), # Operator.Name but not enough emphasized with that
+ (r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
+ (r'\d+([eE][+-]?\d+)?', Number.Integer),
+ (r'[+-]?Infinity', Number.Integer),
+ (r'(\w+|(\.(?!\.)))', Text)
+ ]
+
+ }
diff --git a/pygments/lexers/apdlexer.py b/pygments/lexers/apdlexer.py
new file mode 100644
index 0000000..74ef751
--- /dev/null
+++ b/pygments/lexers/apdlexer.py
@@ -0,0 +1,447 @@
+"""
+ pygments.lexers.apdlexer
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for ANSYS Parametric Design Language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ String, Generic, Punctuation, Whitespace
+
+__all__ = ['apdlexer']
+
+
+class apdlexer(RegexLexer):
+ """
+ For APDL source code.
+
+ .. versionadded:: 2.9
+ """
+ name = 'ANSYS parametric design language'
+ aliases = ['ansys', 'apdl']
+ filenames = ['*.ans']
+ flags = re.IGNORECASE
+
+ # list of elements
+ elafunb = ("SURF152", "SURF153", "SURF154", "SURF156", "SHELL157",
+ "SURF159", "LINK160", "BEAM161", "PLANE162",
+ "SHELL163", "SOLID164", "COMBI165", "MASS166",
+ "LINK167", "SOLID168", "TARGE169", "TARGE170",
+ "CONTA171", "CONTA172", "CONTA173", "CONTA174",
+ "CONTA175", "CONTA176", "CONTA177", "CONTA178",
+ "PRETS179", "LINK180", "SHELL181", "PLANE182",
+ "PLANE183", "MPC184", "SOLID185", "SOLID186",
+ "SOLID187", "BEAM188", "BEAM189", "SOLSH190",
+ "INTER192", "INTER193", "INTER194", "INTER195",
+ "MESH200", "FOLLW201", "INTER202", "INTER203",
+ "INTER204", "INTER205", "SHELL208", "SHELL209",
+ "CPT212", "CPT213", "COMBI214", "CPT215", "CPT216",
+ "CPT217", "FLUID220", "FLUID221", "PLANE223",
+ "SOLID226", "SOLID227", "PLANE230", "SOLID231",
+ "SOLID232", "PLANE233", "SOLID236", "SOLID237",
+ "PLANE238", "SOLID239", "SOLID240", "HSFLD241",
+ "HSFLD242", "SURF251", "SURF252", "REINF263",
+ "REINF264", "REINF265", "SOLID272", "SOLID273",
+ "SOLID278", "SOLID279", "SHELL281", "SOLID285",
+ "PIPE288", "PIPE289", "ELBOW290", "USER300", "BEAM3",
+ "BEAM4", "BEAM23", "BEAM24", "BEAM44", "BEAM54",
+ "COMBIN7", "FLUID79", "FLUID80", "FLUID81", "FLUID141",
+ "FLUID142", "INFIN9", "INFIN47", "PLANE13", "PLANE25",
+ "PLANE42", "PLANE53", "PLANE67", "PLANE82", "PLANE83",
+ "PLANE145", "PLANE146", "CONTAC12", "CONTAC52",
+ "LINK1", "LINK8", "LINK10", "LINK32", "PIPE16",
+ "PIPE17", "PIPE18", "PIPE20", "PIPE59", "PIPE60",
+ "SHELL41", "SHELL43", "SHELL57", "SHELL63", "SHELL91",
+ "SHELL93", "SHELL99", "SHELL150", "SOLID5", "SOLID45",
+ "SOLID46", "SOLID65", "SOLID69", "SOLID92", "SOLID95",
+ "SOLID117", "SOLID127", "SOLID128", "SOLID147",
+ "SOLID148", "SOLID191", "VISCO88", "VISCO89",
+ "VISCO106", "VISCO107", "VISCO108", "TRANS109")
+
+ elafunc = ("PGRAPH", "/VT", "VTIN", "VTRFIL", "VTTEMP", "PGRSET",
+ "VTCLR", "VTMETH", "VTRSLT", "VTVMOD", "PGSELE",
+ "VTDISC", "VTMP", "VTSEC", "PGWRITE", "VTEVAL", "VTOP",
+ "VTSFE", "POUTRES", "VTFREQ", "VTPOST", "VTSL",
+ "FLDATA1-40", "HFPCSWP", "MSDATA", "MSVARY", "QFACT",
+ "FLOCHECK", "HFPOWER", "MSMASS", "PERI", "SPADP",
+ "FLREAD", "HFPORT", "MSMETH", "PLFSS", "SPARM",
+ "FLOTRAN", "HFSCAT", "MSMIR", "PLSCH", "SPFSS",
+ "HFADP", "ICE", "MSNOMF", "PLSYZ", "SPICE", "HFARRAY",
+ "ICEDELE", "MSPROP", "PLTD", "SPSCAN", "HFDEEM",
+ "ICELIST", "MSQUAD", "PLTLINE", "SPSWP", "HFEIGOPT",
+ "ICVFRC", "MSRELAX", "PLVFRC", "HFEREFINE", "LPRT",
+ "MSSOLU", "/PICE", "HFMODPRT", "MSADV", "MSSPEC",
+ "PLWAVE", "HFPA", "MSCAP", "MSTERM", "PRSYZ")
+
+ elafund = ("*VOPER", "VOVLAP", "*VPLOT", "VPLOT", "VPTN", "*VPUT",
+ "VPUT", "*VREAD", "VROTAT", "VSBA", "VSBV", "VSBW",
+ "/VSCALE", "*VSCFUN", "VSEL", "VSLA", "*VSTAT", "VSUM",
+ "VSWEEP", "VSYMM", "VTRAN", "VTYPE", "/VUP", "*VWRITE",
+ "/WAIT", "WAVES", "WERASE", "WFRONT", "/WINDOW",
+ "WMID", "WMORE", "WPAVE", "WPCSYS", "WPLANE", "WPOFFS",
+ "WPROTA", "WPSTYL", "WRFULL", "WRITE", "WRITEMAP",
+ "*WRK", "WSORT", "WSPRINGS", "WSTART", "WTBCREATE",
+ "XFDATA", "XFENRICH", "XFLIST", "/XFRM", "/XRANGE",
+ "XVAR", "/YRANGE", "/ZOOM", "/WB", "XMLO", "/XML",
+ "CNTR", "EBLOCK", "CMBLOCK", "NBLOCK", "/TRACK",
+ "CWZPLOT", "~EUI", "NELE", "EALL", "NALL", "FLITEM",
+ "LSLN", "PSOLVE", "ASLN", "/VERIFY", "/SSS", "~CFIN",
+ "*EVAL", "*MOONEY", "/RUNSTAT", "ALPFILL",
+ "ARCOLLAPSE", "ARDETACH", "ARFILL", "ARMERGE",
+ "ARSPLIT", "FIPLOT", "GAPFINISH", "GAPLIST",
+ "GAPMERGE", "GAPOPT", "GAPPLOT", "LNCOLLAPSE",
+ "LNDETACH", "LNFILL", "LNMERGE", "LNSPLIT", "PCONV",
+ "PLCONV", "PEMOPTS", "PEXCLUDE", "PINCLUDE", "PMETH",
+ "/PMETH", "PMOPTS", "PPLOT", "PPRANGE", "PRCONV",
+ "PRECISION", "RALL", "RFILSZ", "RITER", "RMEMRY",
+ "RSPEED", "RSTAT", "RTIMST", "/RUNST", "RWFRNT",
+ "SARPLOT", "SHSD", "SLPPLOT", "SLSPLOT", "VCVFILL",
+ "/OPT", "OPEQN", "OPFACT", "OPFRST", "OPGRAD",
+ "OPKEEP", "OPLOOP", "OPPRNT", "OPRAND", "OPSUBP",
+ "OPSWEEP", "OPTYPE", "OPUSER", "OPVAR", "OPADD",
+ "OPCLR", "OPDEL", "OPMAKE", "OPSEL", "OPANL", "OPDATA",
+ "OPRESU", "OPSAVE", "OPEXE", "OPLFA", "OPLGR",
+ "OPLIST", "OPLSW", "OPRFA", "OPRGR", "OPRSW",
+ "PILECALC", "PILEDISPSET", "PILEGEN", "PILELOAD",
+ "PILEMASS", "PILERUN", "PILESEL", "PILESTIF",
+ "PLVAROPT", "PRVAROPT", "TOCOMP", "TODEF", "TOFREQ",
+ "TOTYPE", "TOVAR", "TOEXE", "TOLOOP", "TOGRAPH",
+ "TOLIST", "TOPLOT", "TOPRINT", "TOSTAT", "TZAMESH",
+ "TZDELE", "TZEGEN", "XVAROPT", "PGSAVE", "SOLCONTROL",
+ "TOTAL", "VTGEOM", "VTREAL", "VTSTAT")
+
+ elafune = ("/ANUM", "AOFFST", "AOVLAP", "APLOT", "APPEND", "APTN",
+ "ARCLEN", "ARCTRM", "AREAS", "AREFINE", "AREMESH",
+ "AREVERSE", "AROTAT", "ARSCALE", "ARSYM", "ASBA",
+ "ASBL", "ASBV", "ASBW", "ASCRES", "ASEL", "ASIFILE",
+ "*ASK", "ASKIN", "ASLL", "ASLV", "ASOL", "/ASSIGN",
+ "ASUB", "ASUM", "ATAN", "ATRAN", "ATYPE", "/AUTO",
+ "AUTOTS", "/AUX2", "/AUX3", "/AUX12", "/AUX15",
+ "AVPRIN", "AVRES", "AWAVE", "/AXLAB", "*AXPY",
+ "/BATCH", "BCSOPTION", "BETAD", "BF", "BFA", "BFADELE",
+ "BFALIST", "BFCUM", "BFDELE", "BFE", "BFECUM",
+ "BFEDELE", "BFELIST", "BFESCAL", "BFINT", "BFK",
+ "BFKDELE", "BFKLIST", "BFL", "BFLDELE", "BFLIST",
+ "BFLLIST", "BFSCALE", "BFTRAN", "BFUNIF", "BFV",
+ "BFVDELE", "BFVLIST", "BIOOPT", "BIOT", "BLC4", "BLC5",
+ "BLOCK", "BOOL", "BOPTN", "BSAX", "BSMD", "BSM1",
+ "BSM2", "BSPLIN", "BSS1", "BSS2", "BSTE", "BSTQ",
+ "BTOL", "BUCOPT", "C", "CALC", "CAMPBELL", "CBDOF",
+ "CBMD", "CBMX", "CBTE", "CBTMP", "CDOPT", "CDREAD",
+ "CDWRITE", "CE", "CECHECK", "CECMOD", "CECYC",
+ "CEDELE", "CEINTF", "CELIST", "CENTER", "CEQN",
+ "CERIG", "CESGEN", "CFACT", "*CFCLOS", "*CFOPEN",
+ "*CFWRITE", "/CFORMAT", "CGLOC", "CGOMGA", "CGROW",
+ "CHECK", "CHKMSH", "CINT", "CIRCLE", "CISOL",
+ "/CLABEL", "/CLEAR", "CLOCAL", "CLOG", "/CLOG",
+ "CLRMSHLN", "CM", "CMACEL", "/CMAP", "CMATRIX",
+ "CMDELE", "CMDOMEGA", "CMEDIT", "CMGRP", "CMLIST",
+ "CMMOD", "CMOMEGA", "CMPLOT", "CMROTATE", "CMSEL",
+ "CMSFILE", "CMSOPT", "CMWRITE", "CNCHECK", "CNKMOD",
+ "CNTR", "CNVTOL", "/COLOR", "/COM", "*COMP", "COMBINE",
+ "COMPRESS", "CON4", "CONE", "/CONFIG", "CONJUG",
+ "/CONTOUR", "/COPY", "CORIOLIS", "COUPLE", "COVAL",
+ "CP", "CPCYC", "CPDELE", "CPINTF", "/CPLANE", "CPLGEN",
+ "CPLIST", "CPMERGE", "CPNGEN", "CPSGEN", "CQC",
+ "*CREATE", "CRPLIM", "CS", "CSCIR", "CSDELE", "CSKP",
+ "CSLIST", "CSWPLA", "CSYS", "/CTYPE", "CURR2D",
+ "CUTCONTROL", "/CVAL", "CVAR", "/CWD", "CYCCALC",
+ "/CYCEXPAND", "CYCFILES", "CYCFREQ", "*CYCLE",
+ "CYCLIC", "CYCOPT", "CYCPHASE", "CYCSPEC", "CYL4",
+ "CYL5", "CYLIND", "CZDEL", "CZMESH", "D", "DA",
+ "DADELE", "DALIST", "DAMORPH", "DATA", "DATADEF",
+ "DCGOMG", "DCUM", "DCVSWP", "DDASPEC", "DDELE",
+ "DDOPTION", "DEACT", "DEFINE", "*DEL", "DELETE",
+ "/DELETE", "DELTIM", "DEMORPH", "DERIV", "DESIZE",
+ "DESOL", "DETAB", "/DEVDISP", "/DEVICE", "/DFLAB",
+ "DFLX", "DFSWAVE", "DIG", "DIGIT", "*DIM",
+ "/DIRECTORY", "DISPLAY", "/DIST", "DJ", "DJDELE",
+ "DJLIST", "DK", "DKDELE", "DKLIST", "DL", "DLDELE",
+ "DLIST", "DLLIST", "*DMAT", "DMOVE", "DMPEXT",
+ "DMPOPTION", "DMPRAT", "DMPSTR", "DNSOL", "*DO", "DOF",
+ "DOFSEL", "DOMEGA", "*DOT", "*DOWHILE", "DSCALE",
+ "/DSCALE", "DSET", "DSPOPTION", "DSUM", "DSURF",
+ "DSYM", "DSYS", "DTRAN", "DUMP", "/DV3D", "DVAL",
+ "DVMORPH", "DYNOPT", "E", "EALIVE", "EDADAPT", "EDALE",
+ "EDASMP", "EDBOUND", "EDBX", "EDBVIS", "EDCADAPT",
+ "EDCGEN", "EDCLIST", "EDCMORE", "EDCNSTR", "EDCONTACT",
+ "EDCPU", "EDCRB", "EDCSC", "EDCTS", "EDCURVE",
+ "EDDAMP", "EDDBL", "EDDC", "EDDRELAX", "EDDUMP",
+ "EDELE", "EDENERGY", "EDFPLOT", "EDGCALE", "/EDGE",
+ "EDHGLS", "EDHIST", "EDHTIME", "EDINT", "EDIPART",
+ "EDIS", "EDLCS", "EDLOAD", "EDMP", "EDNB", "EDNDTSD",
+ "EDNROT", "EDOPT", "EDOUT", "EDPART", "EDPC", "EDPL",
+ "EDPVEL", "EDRC", "EDRD", "EDREAD", "EDRI", "EDRST",
+ "EDRUN", "EDSHELL", "EDSOLV", "EDSP", "EDSTART",
+ "EDTERM", "EDTP", "EDVEL", "EDWELD", "EDWRITE",
+ "EEXTRUDE", "/EFACET", "EGEN", "*EIGEN", "EINFIN",
+ "EINTF", "EKILL", "ELBOW", "ELEM", "ELIST", "*ELSE",
+ "*ELSEIF", "EMAGERR", "EMATWRITE", "EMF", "EMFT",
+ "EMID", "EMIS", "EMODIF", "EMORE", "EMSYM", "EMTGEN",
+ "EMUNIT", "EN", "*END", "*ENDDO", "*ENDIF",
+ "ENDRELEASE", "ENERSOL", "ENGEN", "ENORM", "ENSYM",
+ "EORIENT", "EPLOT", "EQSLV", "ERASE", "/ERASE",
+ "EREAD", "EREFINE", "EREINF", "ERESX", "ERNORM",
+ "ERRANG", "ESCHECK", "ESEL", "/ESHAPE", "ESIZE",
+ "ESLA", "ESLL", "ESLN", "ESLV", "ESOL", "ESORT",
+ "ESSOLV", "ESTIF", "ESURF", "ESYM", "ESYS", "ET",
+ "ETABLE", "ETCHG", "ETCONTROL", "ETDELE", "ETLIST",
+ "ETYPE", "EUSORT", "EWRITE", "*EXIT", "/EXIT", "EXP",
+ "EXPAND", "/EXPAND", "EXPASS", "*EXPORT", "EXPROFILE",
+ "EXPSOL", "EXTOPT", "EXTREM", "EXUNIT", "F", "/FACET",
+ "FATIGUE", "FC", "FCCHECK", "FCDELE", "FCLIST", "FCUM",
+ "FCTYP", "FDELE", "/FDELE", "FE", "FEBODY", "FECONS",
+ "FEFOR", "FELIST", "FESURF", "*FFT", "FILE",
+ "FILEAUX2", "FILEAUX3", "FILEDISP", "FILL", "FILLDATA",
+ "/FILNAME", "FINISH", "FITEM", "FJ", "FJDELE",
+ "FJLIST", "FK", "FKDELE", "FKLIST", "FL", "FLIST",
+ "FLLIST", "FLST", "FLUXV", "FLUREAD", "FMAGBC",
+ "FMAGSUM", "/FOCUS", "FOR2D", "FORCE", "FORM",
+ "/FORMAT", "FP", "FPLIST", "*FREE", "FREQ", "FRQSCL",
+ "FS", "FSCALE", "FSDELE", "FSLIST", "FSNODE", "FSPLOT",
+ "FSSECT", "FSSPARM", "FSUM", "FTCALC", "FTRAN",
+ "FTSIZE", "FTWRITE", "FTYPE", "FVMESH", "GAP", "GAPF",
+ "GAUGE", "GCDEF", "GCGEN", "/GCMD", "/GCOLUMN",
+ "GENOPT", "GEOM", "GEOMETRY", "*GET", "/GFILE",
+ "/GFORMAT", "/GLINE", "/GMARKER", "GMATRIX", "GMFACE",
+ "*GO", "/GO", "/GOLIST", "/GOPR", "GP", "GPDELE",
+ "GPLIST", "GPLOT", "/GRAPHICS", "/GRESUME", "/GRID",
+ "/GROPT", "GRP", "/GRTYP", "/GSAVE", "GSBDATA",
+ "GSGDATA", "GSLIST", "GSSOL", "/GST", "GSUM", "/GTHK",
+ "/GTYPE", "HARFRQ", "/HBC", "HBMAT", "/HEADER", "HELP",
+ "HELPDISP", "HEMIOPT", "HFANG", "HFSYM", "HMAGSOLV",
+ "HPGL", "HPTCREATE", "HPTDELETE", "HRCPLX", "HREXP",
+ "HROPT", "HROCEAN", "HROUT", "IC", "ICDELE", "ICLIST",
+ "/ICLWID", "/ICSCALE", "*IF", "IGESIN", "IGESOUT",
+ "/IMAGE", "IMAGIN", "IMESH", "IMMED", "IMPD",
+ "INISTATE", "*INIT", "/INPUT", "/INQUIRE", "INRES",
+ "INRTIA", "INT1", "INTSRF", "IOPTN", "IRLF", "IRLIST",
+ "*ITENGINE", "JPEG", "JSOL", "K", "KATT", "KBC",
+ "KBETW", "KCALC", "KCENTER", "KCLEAR", "KDELE",
+ "KDIST", "KEEP", "KESIZE", "KEYOPT", "KEYPTS", "KEYW",
+ "KFILL", "KGEN", "KL", "KLIST", "KMESH", "KMODIF",
+ "KMOVE", "KNODE", "KPLOT", "KPSCALE", "KREFINE",
+ "KSCALE", "KSCON", "KSEL", "KSLL", "KSLN", "KSUM",
+ "KSYMM", "KTRAN", "KUSE", "KWPAVE", "KWPLAN", "L",
+ "L2ANG", "L2TAN", "LANG", "LARC", "/LARC", "LAREA",
+ "LARGE", "LATT", "LAYER", "LAYERP26", "LAYLIST",
+ "LAYPLOT", "LCABS", "LCASE", "LCCALC", "LCCAT",
+ "LCDEF", "LCFACT", "LCFILE", "LCLEAR", "LCOMB",
+ "LCOPER", "LCSEL", "LCSL", "LCSUM", "LCWRITE",
+ "LCZERO", "LDELE", "LDIV", "LDRAG", "LDREAD", "LESIZE",
+ "LEXTND", "LFILLT", "LFSURF", "LGEN", "LGLUE",
+ "LGWRITE", "/LIGHT", "LINA", "LINE", "/LINE", "LINES",
+ "LINL", "LINP", "LINV", "LIST", "*LIST", "LLIST",
+ "LMATRIX", "LMESH", "LNSRCH", "LOCAL", "LOVLAP",
+ "LPLOT", "LPTN", "LREFINE", "LREVERSE", "LROTAT",
+ "LSBA", "*LSBAC", "LSBL", "LSBV", "LSBW", "LSCLEAR",
+ "LSDELE", "*LSDUMP", "LSEL", "*LSENGINE", "*LSFACTOR",
+ "LSLA", "LSLK", "LSOPER", "/LSPEC", "LSREAD",
+ "*LSRESTORE", "LSSCALE", "LSSOLVE", "LSTR", "LSUM",
+ "LSWRITE", "/LSYMBOL", "LSYMM", "LTAN", "LTRAN",
+ "LUMPM", "LVSCALE", "LWPLAN", "M", "MADAPT", "MAGOPT",
+ "MAGSOLV", "/MAIL", "MAP", "/MAP", "MAP2DTO3D",
+ "MAPSOLVE", "MAPVAR", "MASTER", "MAT", "MATER",
+ "MCHECK", "MDAMP", "MDELE", "MDPLOT", "MEMM", "/MENU",
+ "MESHING", "MFANALYSIS", "MFBUCKET", "MFCALC", "MFCI",
+ "MFCLEAR", "MFCMMAND", "MFCONV", "MFDTIME", "MFELEM",
+ "MFEM", "MFEXTER", "MFFNAME", "MFFR", "MFIMPORT",
+ "MFINTER", "MFITER", "MFLCOMM", "MFLIST", "MFMAP",
+ "MFORDER", "MFOUTPUT", "*MFOURI", "MFPSIMUL", "MFRC",
+ "MFRELAX", "MFRSTART", "MFSORDER", "MFSURFACE",
+ "MFTIME", "MFTOL", "*MFUN", "MFVOLUME", "MFWRITE",
+ "MGEN", "MIDTOL", "/MKDIR", "MLIST", "MMASS", "MMF",
+ "MODCONT", "MODE", "MODIFY", "MODMSH", "MODSELOPTION",
+ "MODOPT", "MONITOR", "*MOPER", "MOPT", "MORPH", "MOVE",
+ "MP", "MPAMOD", "MPCHG", "MPCOPY", "MPDATA", "MPDELE",
+ "MPDRES", "/MPLIB", "MPLIST", "MPPLOT", "MPREAD",
+ "MPRINT", "MPTEMP", "MPTGEN", "MPTRES", "MPWRITE",
+ "/MREP", "MSAVE", "*MSG", "MSHAPE", "MSHCOPY",
+ "MSHKEY", "MSHMID", "MSHPATTERN", "MSOLVE", "/MSTART",
+ "MSTOLE", "*MULT", "*MWRITE", "MXPAND", "N", "NANG",
+ "NAXIS", "NCNV", "NDELE", "NDIST", "NDSURF", "NEQIT",
+ "/NERR", "NFORCE", "NGEN", "NKPT", "NLADAPTIVE",
+ "NLDIAG", "NLDPOST", "NLGEOM", "NLHIST", "NLIST",
+ "NLMESH", "NLOG", "NLOPT", "NMODIF", "NOCOLOR",
+ "NODES", "/NOERASE", "/NOLIST", "NOOFFSET", "NOORDER",
+ "/NOPR", "NORA", "NORL", "/NORMAL", "NPLOT", "NPRINT",
+ "NREAD", "NREFINE", "NRLSUM", "*NRM", "NROPT",
+ "NROTAT", "NRRANG", "NSCALE", "NSEL", "NSLA", "NSLE",
+ "NSLK", "NSLL", "NSLV", "NSMOOTH", "NSOL", "NSORT",
+ "NSTORE", "NSUBST", "NSVR", "NSYM", "/NUMBER",
+ "NUMCMP", "NUMEXP", "NUMMRG", "NUMOFF", "NUMSTR",
+ "NUMVAR", "NUSORT", "NWPAVE", "NWPLAN", "NWRITE",
+ "OCDATA", "OCDELETE", "OCLIST", "OCREAD", "OCTABLE",
+ "OCTYPE", "OCZONE", "OMEGA", "OPERATE", "OPNCONTROL",
+ "OUTAERO", "OUTOPT", "OUTPR", "/OUTPUT", "OUTRES",
+ "OVCHECK", "PADELE", "/PAGE", "PAGET", "PAPUT",
+ "PARESU", "PARTSEL", "PARRES", "PARSAV", "PASAVE",
+ "PATH", "PAUSE", "/PBC", "/PBF", "PCALC", "PCGOPT",
+ "PCIRC", "/PCIRCLE", "/PCOPY", "PCROSS", "PDANL",
+ "PDCDF", "PDCFLD", "PDCLR", "PDCMAT", "PDCORR",
+ "PDDMCS", "PDDOEL", "PDEF", "PDEXE", "PDHIST",
+ "PDINQR", "PDLHS", "PDMETH", "PDOT", "PDPINV",
+ "PDPLOT", "PDPROB", "PDRESU", "PDROPT", "/PDS",
+ "PDSAVE", "PDSCAT", "PDSENS", "PDSHIS", "PDUSER",
+ "PDVAR", "PDWRITE", "PERBC2D", "PERTURB", "PFACT",
+ "PHYSICS", "PIVCHECK", "PLCAMP", "PLCFREQ", "PLCHIST",
+ "PLCINT", "PLCPLX", "PLCRACK", "PLDISP", "PLESOL",
+ "PLETAB", "PLFAR", "PLF2D", "PLGEOM", "PLLS", "PLMAP",
+ "PLMC", "PLNEAR", "PLNSOL", "/PLOPTS", "PLORB", "PLOT",
+ "PLOTTING", "PLPAGM", "PLPATH", "PLSECT", "PLST",
+ "PLTIME", "PLTRAC", "PLVAR", "PLVECT", "PLZZ",
+ "/PMACRO", "PMAP", "PMGTRAN", "PMLOPT", "PMLSIZE",
+ "/PMORE", "PNGR", "/PNUM", "POINT", "POLY", "/POLYGON",
+ "/POST1", "/POST26", "POWERH", "PPATH", "PRANGE",
+ "PRAS", "PRCAMP", "PRCINT", "PRCPLX", "PRED",
+ "PRENERGY", "/PREP7", "PRERR", "PRESOL", "PRETAB",
+ "PRFAR", "PRI2", "PRIM", "PRINT", "*PRINT", "PRISM",
+ "PRITER", "PRJSOL", "PRNEAR", "PRNLD", "PRNSOL",
+ "PROD", "PRORB", "PRPATH", "PRRFOR", "PRRSOL",
+ "PRSCONTROL", "PRSECT", "PRTIME", "PRVAR", "PRVECT",
+ "PSCONTROL", "PSCR", "PSDCOM", "PSDFRQ", "PSDGRAPH",
+ "PSDRES", "PSDSPL", "PSDUNIT", "PSDVAL", "PSDWAV",
+ "/PSEARCH", "PSEL", "/PSF", "PSMAT", "PSMESH",
+ "/PSPEC", "/PSTATUS", "PSTRES", "/PSYMB", "PTR",
+ "PTXY", "PVECT", "/PWEDGE", "QDVAL", "QRDOPT", "QSOPT",
+ "QUAD", "/QUIT", "QUOT", "R", "RACE", "RADOPT",
+ "RAPPND", "RATE", "/RATIO", "RBE3", "RCON", "RCYC",
+ "RDEC", "RDELE", "READ", "REAL", "REALVAR", "RECTNG",
+ "REMESH", "/RENAME", "REORDER", "*REPEAT", "/REPLOT",
+ "RESCOMBINE", "RESCONTROL", "RESET", "/RESET", "RESP",
+ "RESUME", "RESVEC", "RESWRITE", "*RETURN", "REXPORT",
+ "REZONE", "RFORCE", "/RGB", "RIGID", "RIGRESP",
+ "RIMPORT", "RLIST", "RMALIST", "RMANL", "RMASTER",
+ "RMCAP", "RMCLIST", "/RMDIR", "RMFLVEC", "RMLVSCALE",
+ "RMMLIST", "RMMRANGE", "RMMSELECT", "RMNDISP",
+ "RMNEVEC", "RMODIF", "RMORE", "RMPORDER", "RMRESUME",
+ "RMRGENERATE", "RMROPTIONS", "RMRPLOT", "RMRSTATUS",
+ "RMSAVE", "RMSMPLE", "RMUSE", "RMXPORT", "ROCK",
+ "ROSE", "RPOLY", "RPR4", "RPRISM", "RPSD", "RSFIT",
+ "RSOPT", "RSPLIT", "RSPLOT", "RSPRNT", "RSSIMS",
+ "RSTMAC", "RSTOFF", "RSURF", "RSYMM", "RSYS", "RTHICK",
+ "SABS", "SADD", "SALLOW", "SAVE", "SBCLIST", "SBCTRAN",
+ "SDELETE", "SE", "SECCONTROL", "SECDATA",
+ "SECFUNCTION", "SECJOINT", "/SECLIB", "SECLOCK",
+ "SECMODIF", "SECNUM", "SECOFFSET", "SECPLOT",
+ "SECREAD", "SECSTOP", "SECTYPE", "SECWRITE", "SED",
+ "SEDLIST", "SEEXP", "/SEG", "SEGEN", "SELIST", "SELM",
+ "SELTOL", "SENERGY", "SEOPT", "SESYMM", "*SET", "SET",
+ "SETFGAP", "SETRAN", "SEXP", "SF", "SFA", "SFACT",
+ "SFADELE", "SFALIST", "SFBEAM", "SFCALC", "SFCUM",
+ "SFDELE", "SFE", "SFEDELE", "SFELIST", "SFFUN",
+ "SFGRAD", "SFL", "SFLDELE", "SFLEX", "SFLIST",
+ "SFLLIST", "SFSCALE", "SFTRAN", "/SHADE", "SHELL",
+ "/SHOW", "/SHOWDISP", "SHPP", "/SHRINK", "SLIST",
+ "SLOAD", "SMALL", "*SMAT", "SMAX", "/SMBC", "SMBODY",
+ "SMCONS", "SMFOR", "SMIN", "SMOOTH", "SMRTSIZE",
+ "SMSURF", "SMULT", "SNOPTION", "SOLU", "/SOLU",
+ "SOLUOPT", "SOLVE", "SORT", "SOURCE", "SPACE",
+ "SPCNOD", "SPCTEMP", "SPDAMP", "SPEC", "SPFREQ",
+ "SPGRAPH", "SPH4", "SPH5", "SPHERE", "SPLINE", "SPLOT",
+ "SPMWRITE", "SPOINT", "SPOPT", "SPREAD", "SPTOPT",
+ "SPOWER", "SPUNIT", "SPVAL", "SQRT", "*SREAD", "SRSS",
+ "SSBT", "/SSCALE", "SSLN", "SSMT", "SSPA", "SSPB",
+ "SSPD", "SSPE", "SSPM", "SSUM", "SSTATE", "STABILIZE",
+ "STAOPT", "STAT", "*STATUS", "/STATUS", "STEF",
+ "/STITLE", "STORE", "SUBOPT", "SUBSET", "SUCALC",
+ "SUCR", "SUDEL", "SUEVAL", "SUGET", "SUMAP", "SUMTYPE",
+ "SUPL", "SUPR", "SURESU", "SUSAVE", "SUSEL", "SUVECT",
+ "SV", "SVPLOT", "SVTYP", "SWADD", "SWDEL", "SWGEN",
+ "SWLIST", "SYNCHRO", "/SYP", "/SYS", "TALLOW",
+ "TARGET", "*TAXIS", "TB", "TBCOPY", "TBDATA", "TBDELE",
+ "TBEO", "TBIN", "TBFIELD", "TBFT", "TBLE", "TBLIST",
+ "TBMODIF", "TBPLOT", "TBPT", "TBTEMP", "TCHG", "/TEE",
+ "TERM", "THEXPAND", "THOPT", "TIFF", "TIME",
+ "TIMERANGE", "TIMINT", "TIMP", "TINTP", "/TITLE",
+ "/TLABEL", "TOFFST", "*TOPER", "TORQ2D", "TORQC2D",
+ "TORQSUM", "TORUS", "TRANS", "TRANSFER", "*TREAD",
+ "TREF", "/TRIAD", "/TRLCY", "TRNOPT", "TRPDEL",
+ "TRPLIS", "TRPOIN", "TRTIME", "TSHAP", "/TSPEC",
+ "TSRES", "TUNIF", "TVAR", "/TXTRE", "/TYPE", "TYPE",
+ "/UCMD", "/UDOC", "/UI", "UIMP", "/UIS", "*ULIB",
+ "UNDELETE", "UNDO", "/UNITS", "UNPAUSE", "UPCOORD",
+ "UPGEOM", "*USE", "/USER", "USRCAL", "USRDOF",
+ "USRELEM", "V", "V2DOPT", "VA", "*VABS", "VADD",
+ "VARDEL", "VARNAM", "VATT", "VCLEAR", "*VCOL",
+ "/VCONE", "VCROSS", "*VCUM", "VDDAM", "VDELE", "VDGL",
+ "VDOT", "VDRAG", "*VEC", "*VEDIT", "VEORIENT", "VEXT",
+ "*VFACT", "*VFILL", "VFOPT", "VFQUERY", "VFSM",
+ "*VFUN", "VGEN", "*VGET", "VGET", "VGLUE", "/VIEW",
+ "VIMP", "VINP", "VINV", "*VITRP", "*VLEN", "VLIST",
+ "VLSCALE", "*VMASK", "VMESH", "VOFFST", "VOLUMES")
+
+ # list of in-built () functions
+ elafunf = ("NX()", "NY()", "NZ()", "KX()", "KY()", "KZ()", "LX()",
+ "LY()", "LZ()", "LSX()", "LSY()", "LSZ()", "NODE()",
+ "KP()", "DISTND()", "DISTKP()", "DISTEN()", "ANGLEN()",
+ "ANGLEK()", "NNEAR()", "KNEAR()", "ENEARN()",
+ "AREAND()", "AREAKP()", "ARNODE()", "NORMNX()",
+ "NORMNY()", "NORMNZ()", "NORMKX()", "NORMKY()",
+ "NORMKZ()", "ENEXTN()", "NELEM()", "NODEDOF()",
+ "ELADJ()", "NDFACE()", "NMFACE()", "ARFACE()", "UX()",
+ "UY()", "UZ()", "ROTX()", "ROTY()", "ROTZ()", "TEMP()",
+ "PRES()", "VX()", "VY()", "VZ()", "ENKE()", "ENDS()",
+ "VOLT()", "MAG()", "AX()", "AY()", "AZ()",
+ "VIRTINQR()", "KWGET()", "VALCHR()", "VALHEX()",
+ "CHRHEX()", "STRFILL()", "STRCOMP()", "STRPOS()",
+ "STRLENG()", "UPCASE()", "LWCASE()", "JOIN()",
+ "SPLIT()", "ABS()", "SIGN()", "CXABS()", "EXP()",
+ "LOG()", "LOG10()", "SQRT()", "NINT()", "MOD()",
+ "RAND()", "GDIS()", "SIN()", "COS()", "TAN()",
+ "SINH()", "COSH()", "TANH()", "ASIN()", "ACOS()",
+ "ATAN()", "ATAN2()")
+
+ elafung = ("NSEL()", "ESEL()", "KSEL()", "LSEL()", "ASEL()",
+ "VSEL()", "NDNEXT()", "ELNEXT()", "KPNEXT()",
+ "LSNEXT()", "ARNEXT()", "VLNEXT()", "CENTRX()",
+ "CENTRY()", "CENTRZ()")
+
+ elafunh = ("~CAT5IN", "~CATIAIN", "~PARAIN", "~PROEIN", "~SATIN",
+ "~UGIN", "A", "AADD", "AATT", "ABEXTRACT", "*ABBR",
+ "ABBRES", "ABBSAV", "ABS", "ACCAT", "ACCOPTION",
+ "ACEL", "ACLEAR", "ADAMS", "ADAPT", "ADD", "ADDAM",
+ "ADELE", "ADGL", "ADRAG", "AESIZE", "AFILLT", "AFLIST",
+ "AFSURF", "*AFUN", "AGEN", "AGLUE", "AINA", "AINP",
+ "AINV", "AL", "ALIST", "ALLSEL", "ALPHAD", "AMAP",
+ "AMESH", "/AN3D", "ANCNTR", "ANCUT", "ANCYC", "ANDATA",
+ "ANDSCL", "ANDYNA", "/ANFILE", "ANFLOW", "/ANGLE",
+ "ANHARM", "ANIM", "ANISOS", "ANMODE", "ANMRES",
+ "/ANNOT", "ANORM", "ANPRES", "ANSOL", "ANSTOAQWA",
+ "ANSTOASAS", "ANTIME", "ANTYPE")
+
+ tokens = {
+ 'root': [
+ (r'!.*\n', Comment),
+ include('strings'),
+ include('core'),
+ include('nums'),
+ (words((elafunb+elafunc+elafund+elafune+elafunh), suffix=r'\b'), Keyword),
+ (words((elafunf+elafung), suffix=r'\b'), Name.Builtin),
+ (r'AR[0-9]+', Name.Variable.Instance),
+ (r'[a-z][a-z0-9_]*', Name.Variable),
+ (r'[\s]+', Whitespace),
+ ],
+ 'core': [
+ # Operators
+ (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
+ (r'/EOF', Generic.Emph),
+ (r'[(),:&;]', Punctuation),
+ ],
+ 'strings': [
+ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
+ (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+ (r'[$%]', String.Symbol),
+ ],
+ 'nums': [
+ (r'\d+(?![.ef])', Number.Integer),
+ (r'[+-]?\d*\.?\d+([ef][-+]?\d+)?', Number.Float),
+ (r'[+-]?\d+\.?\d*([ef][-+]?\d+)?', Number.Float),
+ ]
+ }
diff --git a/pygments/lexers/apl.py b/pygments/lexers/apl.py
new file mode 100644
index 0000000..57905b5
--- /dev/null
+++ b/pygments/lexers/apl.py
@@ -0,0 +1,104 @@
+"""
+ pygments.lexers.apl
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for APL.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['APLLexer']
+
+
+class APLLexer(RegexLexer):
+ """
+ A simple APL lexer.
+
+ .. versionadded:: 2.0
+ """
+ name = 'APL'
+ url = 'https://en.m.wikipedia.org/wiki/APL_(programming_language)'
+ aliases = ['apl']
+ filenames = [
+ '*.apl', '*.aplf', '*.aplo', '*.apln',
+ '*.aplc', '*.apli', '*.dyalog',
+ ]
+
+ tokens = {
+ 'root': [
+ # Whitespace
+ # ==========
+ (r'\s+', Whitespace),
+ #
+ # Comment
+ # =======
+ # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
+ (r'[⍝#].*$', Comment.Single),
+ #
+ # Strings
+ # =======
+ (r'\'((\'\')|[^\'])*\'', String.Single),
+ (r'"(("")|[^"])*"', String.Double), # supported by NGN APL
+ #
+ # Punctuation
+ # ===========
+ # This token type is used for diamond and parenthesis
+ # but not for bracket and ; (see below)
+ (r'[⋄◇()]', Punctuation),
+ #
+ # Array indexing
+ # ==============
+ # Since this token type is very important in APL, it is not included in
+ # the punctuation token type but rather in the following one
+ (r'[\[\];]', String.Regex),
+ #
+ # Distinguished names
+ # ===================
+ # following IBM APL2 standard
+ (r'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
+ #
+ # Labels
+ # ======
+ # following IBM APL2 standard
+ # (r'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
+ #
+ # Variables
+ # =========
+ # following IBM APL2 standard (with a leading _ ok for GNU APL and Dyalog)
+ (r'[A-Za-zΔ∆⍙_][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
+ #
+ # Numbers
+ # =======
+ (r'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
+ r'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
+ Number),
+ #
+ # Operators
+ # ==========
+ (r'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘⌸&⌶@⌺⍥⍛⍢]', Name.Attribute), # closest token type
+ (r'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗⊆⊇⍸√⌾…⍮]',
+ Operator),
+ #
+ # Constant
+ # ========
+ (r'⍬', Name.Constant),
+ #
+ # Quad symbol
+ # ===========
+ (r'[⎕⍞]', Name.Variable.Global),
+ #
+ # Arrows left/right
+ # =================
+ (r'[←→]', Keyword.Declaration),
+ #
+ # D-Fn
+ # ====
+ (r'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
+ (r'[{}]', Keyword.Type),
+ ],
+ }
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
new file mode 100644
index 0000000..1496d22
--- /dev/null
+++ b/pygments/lexers/archetype.py
@@ -0,0 +1,319 @@
+"""
+ pygments.lexers.archetype
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Archetype-related syntaxes, including:
+
+ - ODIN syntax <https://github.com/openEHR/odin>
+ - ADL syntax <http://www.openehr.org/releases/trunk/architecture/am/adl2.pdf>
+ - cADL sub-syntax of ADL
+
+ For uses of this syntax, see the openEHR archetypes <http://www.openEHR.org/ckm>
+
+ Contributed by Thomas Beale <https://github.com/wolandscat>,
+ <https://bitbucket.org/thomas_beale>.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, using, default
+from pygments.token import Text, Comment, Name, Literal, Number, String, \
+ Punctuation, Keyword, Operator, Generic, Whitespace
+
+__all__ = ['OdinLexer', 'CadlLexer', 'AdlLexer']
+
+
+class AtomsLexer(RegexLexer):
+ """
+ Lexer for Values used in ADL and ODIN.
+
+ .. versionadded:: 2.1
+ """
+
+ tokens = {
+ # ----- pseudo-states for inclusion -----
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'([ \t]*)(--.*)$', bygroups(Whitespace, Comment)),
+ ],
+ 'archetype_id': [
+ (r'([ \t]*)(([a-zA-Z]\w+(\.[a-zA-Z]\w+)*::)?[a-zA-Z]\w+(-[a-zA-Z]\w+){2}'
+ r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?)',
+ bygroups(Whitespace, Name.Decorator)),
+ ],
+ 'date_constraints': [
+ # ISO 8601-based date/time constraints
+ (r'[Xx?YyMmDdHhSs\d]{2,4}([:-][Xx?YyMmDdHhSs\d]{2}){2}', Literal.Date),
+ # ISO 8601-based duration constraints + optional trailing slash
+ (r'(P[YyMmWwDd]+(T[HhMmSs]+)?|PT[HhMmSs]+)/?', Literal.Date),
+ ],
+ 'ordered_values': [
+ # ISO 8601 date with optional 'T' ligature
+ (r'\d{4}-\d{2}-\d{2}T?', Literal.Date),
+ # ISO 8601 time
+ (r'\d{2}:\d{2}:\d{2}(\.\d+)?([+-]\d{4}|Z)?', Literal.Date),
+ # ISO 8601 duration
+ (r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
+ r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
+ (r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
+ (r'[+-]?\d*\.\d+%?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[+-]?\d+%?', Number.Integer),
+ ],
+ 'values': [
+ include('ordered_values'),
+ (r'([Tt]rue|[Ff]alse)', Literal),
+ (r'"', String, 'string'),
+ (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'[a-z][a-z0-9+.-]*:', Literal, 'uri'),
+ # term code
+ (r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)(\w[\w-]*)(\])',
+ bygroups(Punctuation, Name.Decorator, Punctuation, Name.Decorator,
+ Punctuation)),
+ (r'\|', Punctuation, 'interval'),
+ # list continuation
+ (r'\.\.\.', Punctuation),
+ ],
+ 'constraint_values': [
+ (r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)',
+ bygroups(Punctuation, Name.Decorator, Punctuation), 'adl14_code_constraint'),
+ # ADL 1.4 ordinal constraint
+ (r'(\d*)(\|)(\[\w[\w-]*::\w[\w-]*\])((?:[,;])?)',
+ bygroups(Number, Punctuation, Name.Decorator, Punctuation)),
+ include('date_constraints'),
+ include('values'),
+ ],
+
+ # ----- real states -----
+ 'string': [
+ ('"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
+ r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
+ # all other characters
+ (r'[^\\"]+', String),
+ # stray backslash
+ (r'\\', String),
+ ],
+ 'uri': [
+ # effective URI terminators
+ (r'[,>\s]', Punctuation, '#pop'),
+ (r'[^>\s,]+', Literal),
+ ],
+ 'interval': [
+ (r'\|', Punctuation, '#pop'),
+ include('ordered_values'),
+ (r'\.\.', Punctuation),
+ (r'[<>=] *', Punctuation),
+ # handle +/-
+ (r'\+/-', Punctuation),
+ (r'\s+', Whitespace),
+ ],
+ 'any_code': [
+ include('archetype_id'),
+ # if it is a code
+ (r'[a-z_]\w*[0-9.]+(@[^\]]+)?', Name.Decorator),
+ # if it is tuple with attribute names
+ (r'[a-z_]\w*', Name.Class),
+ # if it is an integer, i.e. Xpath child index
+ (r'[0-9]+', Text),
+ (r'\|', Punctuation, 'code_rubric'),
+ (r'\]', Punctuation, '#pop'),
+ # handle use_archetype statement
+ (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
+ ],
+ 'code_rubric': [
+ (r'\|', Punctuation, '#pop'),
+ (r'[^|]+', String),
+ ],
+ 'adl14_code_constraint': [
+ (r'\]', Punctuation, '#pop'),
+ (r'\|', Punctuation, 'code_rubric'),
+ (r'(\w[\w-]*)([;,]?)', bygroups(Name.Decorator, Punctuation)),
+ include('whitespace'),
+ ],
+ }
+
+
+class OdinLexer(AtomsLexer):
+ """
+ Lexer for ODIN syntax.
+
+ .. versionadded:: 2.1
+ """
+ name = 'ODIN'
+ aliases = ['odin']
+ filenames = ['*.odin']
+ mimetypes = ['text/odin']
+
+ tokens = {
+ 'path': [
+ (r'>', Punctuation, '#pop'),
+ # attribute name
+ (r'[a-z_]\w*', Name.Class),
+ (r'/', Punctuation),
+ (r'\[', Punctuation, 'key'),
+ (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace), '#pop'),
+ (r'\s+', Whitespace, '#pop'),
+ ],
+ 'key': [
+ include('values'),
+ (r'\]', Punctuation, '#pop'),
+ ],
+ 'type_cast': [
+ (r'\)', Punctuation, '#pop'),
+ (r'[^)]+', Name.Class),
+ ],
+ 'root': [
+ include('whitespace'),
+ (r'([Tt]rue|[Ff]alse)', Literal),
+ include('values'),
+ # x-ref path
+ (r'/', Punctuation, 'path'),
+ # x-ref path starting with key
+ (r'\[', Punctuation, 'key'),
+ # attribute name
+ (r'[a-z_]\w*', Name.Class),
+ (r'=', Operator),
+ (r'\(', Punctuation, 'type_cast'),
+ (r',', Punctuation),
+ (r'<', Punctuation),
+ (r'>', Punctuation),
+ (r';', Punctuation),
+ ],
+ }
+
+
+class CadlLexer(AtomsLexer):
+ """
+ Lexer for cADL syntax.
+
+ .. versionadded:: 2.1
+ """
+ name = 'cADL'
+ aliases = ['cadl']
+ filenames = ['*.cadl']
+
+ tokens = {
+ 'path': [
+ # attribute name
+ (r'[a-z_]\w*', Name.Class),
+ (r'/', Punctuation),
+ (r'\[', Punctuation, 'any_code'),
+ (r'\s+', Punctuation, '#pop'),
+ ],
+ 'root': [
+ include('whitespace'),
+ (r'(cardinality|existence|occurrences|group|include|exclude|'
+ r'allow_archetype|use_archetype|use_node)\W', Keyword.Type),
+ (r'(and|or|not|there_exists|xor|implies|for_all)\W', Keyword.Type),
+ (r'(after|before|closed)\W', Keyword.Type),
+ (r'(not)\W', Operator),
+ (r'(matches|is_in)\W', Operator),
+ # is_in / not is_in char
+ ('(\u2208|\u2209)', Operator),
+ # there_exists / not there_exists / for_all / and / or
+ ('(\u2203|\u2204|\u2200|\u2227|\u2228|\u22BB|\223C)',
+ Operator),
+ # regex in slot or as string constraint
+ (r'(\{)(\s*)(/[^}]+/)(\s*)(\})',
+ bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)),
+ # regex in slot or as string constraint
+ (r'(\{)(\s*)(\^[^}]+\^)(\s*)(\})',
+ bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)),
+ (r'/', Punctuation, 'path'),
+ # for cardinality etc
+ (r'(\{)((?:\d+\.\.)?(?:\d+|\*))'
+ r'((?:\s*;\s*(?:ordered|unordered|unique)){,2})(\})',
+ bygroups(Punctuation, Number, Number, Punctuation)),
+ # [{ is start of a tuple value
+ (r'\[\{', Punctuation),
+ (r'\}\]', Punctuation),
+ (r'\{', Punctuation),
+ (r'\}', Punctuation),
+ include('constraint_values'),
+ # type name
+ (r'[A-Z]\w+(<[A-Z]\w+([A-Za-z_<>]*)>)?', Name.Class),
+ # attribute name
+ (r'[a-z_]\w*', Name.Class),
+ (r'\[', Punctuation, 'any_code'),
+ (r'(~|//|\\\\|\+|-|/|\*|\^|!=|=|<=|>=|<|>]?)', Operator),
+ (r'\(', Punctuation),
+ (r'\)', Punctuation),
+ # for lists of values
+ (r',', Punctuation),
+ (r'"', String, 'string'),
+ # for assumed value
+ (r';', Punctuation),
+ ],
+ }
+
+
+class AdlLexer(AtomsLexer):
+ """
+ Lexer for ADL syntax.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'ADL'
+ aliases = ['adl']
+ filenames = ['*.adl', '*.adls', '*.adlf', '*.adlx']
+
+ tokens = {
+ 'whitespace': [
+ # blank line ends
+ (r'\s*\n', Whitespace),
+ # comment-only line
+ (r'^([ \t]*)(--.*)$', bygroups(Whitespace, Comment)),
+ ],
+ 'odin_section': [
+ # repeating the following two rules from the root state enable multi-line
+ # strings that start in the first column to be dealt with
+ (r'^(language|description|ontology|terminology|annotations|'
+ r'component_terminologies|revision_history)([ \t]*\n)',
+ bygroups(Generic.Heading, Whitespace)),
+ (r'^(definition)([ \t]*\n)', bygroups(Generic.Heading, Whitespace), 'cadl_section'),
+ (r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)),
+ (r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)),
+ # template overlay delimiter
+ (r'^----------*\n', Text, '#pop'),
+ (r'^.*\n', String),
+ default('#pop'),
+ ],
+ 'cadl_section': [
+ (r'^([ \t]*|[ \t]+.*)\n', using(CadlLexer)),
+ default('#pop'),
+ ],
+ 'rules_section': [
+ (r'^[ \t]+.*\n', using(CadlLexer)),
+ default('#pop'),
+ ],
+ 'metadata': [
+ (r'\)', Punctuation, '#pop'),
+ (r';', Punctuation),
+ (r'([Tt]rue|[Ff]alse)', Literal),
+ # numbers and version ids
+ (r'\d+(\.\d+)*', Literal),
+ # Guids
+ (r'(\d|[a-fA-F])+(-(\d|[a-fA-F])+){3,}', Literal),
+ (r'\w+', Name.Class),
+ (r'"', String, 'string'),
+ (r'=', Operator),
+ (r'[ \t]+', Whitespace),
+ default('#pop'),
+ ],
+ 'root': [
+ (r'^(archetype|template_overlay|operational_template|template|'
+ r'speciali[sz]e)', Generic.Heading),
+ (r'^(language|description|ontology|terminology|annotations|'
+ r'component_terminologies|revision_history)[ \t]*\n',
+ Generic.Heading, 'odin_section'),
+ (r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'),
+ (r'^(rules)[ \t]*\n', Generic.Heading, 'rules_section'),
+ include('archetype_id'),
+ (r'([ \t]*)(\()', bygroups(Whitespace, Punctuation), 'metadata'),
+ include('whitespace'),
+ ],
+ }
diff --git a/pygments/lexers/arrow.py b/pygments/lexers/arrow.py
new file mode 100644
index 0000000..bdfdc74
--- /dev/null
+++ b/pygments/lexers/arrow.py
@@ -0,0 +1,117 @@
+"""
+ pygments.lexers.arrow
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Arrow.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, default, include
+from pygments.token import Text, Operator, Keyword, Punctuation, Name, \
+ String, Number, Whitespace
+
+__all__ = ['ArrowLexer']
+
+TYPES = r'\b(int|bool|char)((?:\[\])*)(?=\s+)'
+IDENT = r'([a-zA-Z_][a-zA-Z0-9_]*)'
+DECL = TYPES + r'(\s+)' + IDENT
+
+
+class ArrowLexer(RegexLexer):
+ """
+ Lexer for Arrow
+
+ .. versionadded:: 2.7
+ """
+
+ name = 'Arrow'
+ url = 'https://pypi.org/project/py-arrow-lang/'
+ aliases = ['arrow']
+ filenames = ['*.arw']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'^[|\s]+', Punctuation),
+ include('blocks'),
+ include('statements'),
+ include('expressions'),
+ ],
+ 'blocks': [
+ (r'(function)(\n+)(/-->)(\s*)' +
+ DECL + # 4 groups
+ r'(\()', bygroups(
+ Keyword.Reserved, Whitespace, Punctuation,
+ Whitespace, Keyword.Type, Punctuation, Whitespace,
+ Name.Function, Punctuation
+ ), 'fparams'),
+ (r'/-->$|\\-->$|/--<|\\--<|\^', Punctuation),
+ ],
+ 'statements': [
+ (DECL, bygroups(Keyword.Type, Punctuation, Text, Name.Variable)),
+ (r'\[', Punctuation, 'index'),
+ (r'=', Operator),
+ (r'require|main', Keyword.Reserved),
+ (r'print', Keyword.Reserved, 'print'),
+ ],
+ 'expressions': [
+ (r'\s+', Whitespace),
+ (r'[0-9]+', Number.Integer),
+ (r'true|false', Keyword.Constant),
+ (r"'", String.Char, 'char'),
+ (r'"', String.Double, 'string'),
+ (r'\{', Punctuation, 'array'),
+ (r'==|!=|<|>|\+|-|\*|/|%', Operator),
+ (r'and|or|not|length', Operator.Word),
+ (r'(input)(\s+)(int|char\[\])', bygroups(
+ Keyword.Reserved, Whitespace, Keyword.Type
+ )),
+ (IDENT + r'(\()', bygroups(
+ Name.Function, Punctuation
+ ), 'fargs'),
+ (IDENT, Name.Variable),
+ (r'\[', Punctuation, 'index'),
+ (r'\(', Punctuation, 'expressions'),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'print': [
+ include('expressions'),
+ (r',', Punctuation),
+ default('#pop'),
+ ],
+ 'fparams': [
+ (DECL, bygroups(Keyword.Type, Punctuation, Whitespace, Name.Variable)),
+ (r',', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'escape': [
+ (r'\\(["\\/abfnrtv]|[0-9]{1,3}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4})',
+ String.Escape),
+ ],
+ 'char': [
+ (r"'", String.Char, '#pop'),
+ include('escape'),
+ (r"[^'\\]", String.Char),
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ include('escape'),
+ (r'[^"\\]+', String.Double),
+ ],
+ 'array': [
+ include('expressions'),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation),
+ ],
+ 'fargs': [
+ include('expressions'),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation),
+ ],
+ 'index': [
+ include('expressions'),
+ (r'\]', Punctuation, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/arturo.py b/pygments/lexers/arturo.py
new file mode 100644
index 0000000..406bcc6
--- /dev/null
+++ b/pygments/lexers/arturo.py
@@ -0,0 +1,250 @@
+"""
+ pygments.lexers.arturo
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Arturo language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, do_insertions, include, \
+ this, using, words
+from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text
+
+from pygments.util import ClassNotFound, get_bool_opt
+
+__all__ = ['ArturoLexer']
+
+
+class ArturoLexer(RegexLexer):
+ """
+ For Arturo source code.
+
+ See `Arturo's Github <https://github.com/arturo-lang/arturo>`_
+ and `Arturo's Website <https://arturo-lang.io/>`_.
+
+ .. versionadded:: 2.14.0
+ """
+
+ name = 'Arturo'
+ aliases = ['arturo', 'art']
+ filenames = ['*.art']
+ url = 'https://arturo-lang.io/'
+
+ def __init__(self, **options):
+ self.handle_annotateds = get_bool_opt(options, 'handle_annotateds',
+ True)
+ RegexLexer.__init__(self, **options)
+
+ def handle_annotated_strings(self, match):
+ """Adds syntax from another languages inside annotated strings
+
+ match args:
+ 1:open_string,
+ 2:exclamation_mark,
+ 3:lang_name,
+ 4:space_or_newline,
+ 5:code,
+ 6:close_string
+ """
+ from pygments.lexers import get_lexer_by_name
+
+ # Header's section
+ yield match.start(1), String.Double, match.group(1)
+ yield match.start(2), String.Interpol, match.group(2)
+ yield match.start(3), String.Interpol, match.group(3)
+ yield match.start(4), Text.Whitespace, match.group(4)
+
+ lexer = None
+ if self.handle_annotateds:
+ try:
+ lexer = get_lexer_by_name(match.group(3).strip())
+ except ClassNotFound:
+ pass
+ code = match.group(5)
+
+ if lexer is None:
+ yield match.group(5), String, code
+ else:
+ yield from do_insertions([], lexer.get_tokens_unprocessed(code))
+
+ yield match.start(6), String.Double, match.group(6)
+
+ tokens = {
+ 'root': [
+ (r';.*?$', Comment.Single),
+ (r'^((\s#!)|(#!)).*?$', Comment.Hashbang),
+
+ # Constants
+ (words(('false', 'true', 'maybe'), # boolean
+ suffix=r'\b'), Name.Constant),
+ (words(('this', 'init'), # class related keywords
+ prefix=r'\b', suffix=r'\b\??:?'), Name.Builtin.Pseudo),
+ (r'`.`', String.Char), # character
+ (r'\\\w+\b\??:?', Name.Property), # array index
+ (r'#\w+', Name.Constant), # color
+ (r'\b[0-9]+\.[0-9]+', Number.Float), # float
+ (r'\b[0-9]+', Number.Integer), # integer
+ (r'\w+\b\??:', Name.Label), # label
+ # Note: Literals can be labeled too
+ (r'\'(?:\w+\b\??:?)', Keyword.Declaration), # literal
+ (r'\:\w+', Keyword.Type), # type
+ # Note: Attributes can be labeled too
+ (r'\.\w+\??:?', Name.Attribute), # attributes
+
+ # Switch structure
+ (r'(\()(.*?)(\)\?)',
+ bygroups(Punctuation, using(this), Punctuation)),
+
+ # Single Line Strings
+ (r'"', String.Double, 'inside-simple-string'),
+ (r'»', String.Single, 'inside-smart-string'),
+ (r'«««', String.Double, 'inside-safe-string'),
+ (r'\{\/', String.Single, 'inside-regex-string'),
+
+ # Multi Line Strings
+ (r'\{\:', String.Double, 'inside-curly-verb-string'),
+ (r'(\{)(\!)(\w+)(\s|\n)([\w\W]*?)(^\})', handle_annotated_strings),
+ (r'\{', String.Single, 'inside-curly-string'),
+ (r'\-{3,}', String.Single, 'inside-eof-string'),
+
+ include('builtin-functions'),
+
+ # Operators
+ (r'[()[\],]', Punctuation),
+ (words(('->', '==>', '|', '::', '@', '#', # sugar syntax
+ '$', '&', '!', '!!', './')), Name.Decorator),
+ (words(('<:', ':>', ':<', '>:', '<\\', '<>', '<', '>',
+ 'ø', '∞',
+ '+', '-', '*', '~', '=', '^', '%', '/', '//',
+ '==>', '<=>', '<==>',
+ '=>>', '<<=>>', '<<==>>',
+ '-->', '<->', '<-->',
+ '=|', '|=', '-:', ':-',
+ '_', '.', '..', '\\')), Operator),
+
+ (r'\b\w+', Name),
+ (r'\s+', Text.Whitespace),
+ (r'.+$', Error),
+ ],
+
+ 'inside-interpol': [
+ (r'\|', String.Interpol, '#pop'),
+ (r'[^|]+', using(this)),
+ ],
+ 'inside-template': [
+ (r'\|\|\>', String.Interpol, '#pop'),
+ (r'[^|]+', using(this)),
+ ],
+ 'string-escape': [
+ (words(('\\\\', '\\n', '\\t', '\\"')), String.Escape),
+ ],
+
+ 'inside-simple-string': [
+ include('string-escape'),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'"', String.Double, '#pop'), # Closing Quote
+ (r'[^|"]+', String) # String Content
+ ],
+ 'inside-smart-string': [
+ include('string-escape'),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'\n', String.Single, '#pop'), # Closing Quote
+ (r'[^|\n]+', String) # String Content
+ ],
+ 'inside-safe-string': [
+ include('string-escape'),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'»»»', String.Double, '#pop'), # Closing Quote
+ (r'[^|»]+', String) # String Content
+ ],
+ 'inside-regex-string': [
+ (r'\\[sSwWdDbBZApPxucItnvfr0]+', String.Escape),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'\/\}', String.Single, '#pop'), # Closing Quote
+ (r'[^|\/]+', String.Regex), # String Content
+ ],
+ 'inside-curly-verb-string': [
+ include('string-escape'),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'\:\}', String.Double, '#pop'), # Closing Quote
+ (r'[^|<:]+', String), # String Content
+ ],
+ 'inside-curly-string': [
+ include('string-escape'),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'\}', String.Single, '#pop'), # Closing Quote
+ (r'[^|<}]+', String), # String Content
+ ],
+ 'inside-eof-string': [
+ include('string-escape'),
+ (r'\|', String.Interpol, 'inside-interpol'), # Interpolation
+ (r'\<\|\|', String.Interpol, 'inside-template'), # Templates
+ (r'\Z', String.Single, '#pop'), # Closing Quote
+ (r'[^|<]+', String), # String Content
+ ],
+
+ 'builtin-functions': [
+ (words((
+ 'all', 'and', 'any', 'ascii', 'attr', 'attribute',
+ 'attributeLabel', 'binary', 'block' 'char', 'contains',
+ 'database', 'date', 'dictionary', 'empty', 'equal', 'even',
+ 'every', 'exists', 'false', 'floatin', 'function', 'greater',
+ 'greaterOrEqual', 'if', 'in', 'inline', 'integer', 'is',
+ 'key', 'label', 'leap', 'less', 'lessOrEqual', 'literal',
+ 'logical', 'lower', 'nand', 'negative', 'nor', 'not',
+ 'notEqual', 'null', 'numeric', 'odd', 'or', 'path',
+ 'pathLabel', 'positive', 'prefix', 'prime', 'set', 'some',
+ 'sorted', 'standalone', 'string', 'subset', 'suffix',
+ 'superset', 'ymbol', 'true', 'try', 'type', 'unless', 'upper',
+ 'when', 'whitespace', 'word', 'xnor', 'xor', 'zero',
+ ), prefix=r'\b', suffix=r'\b\?'), Name.Builtin),
+ (words((
+ 'abs', 'acos', 'acosh', 'acsec', 'acsech', 'actan', 'actanh',
+ 'add', 'after', 'alphabet', 'and', 'angle', 'append', 'arg',
+ 'args', 'arity', 'array', 'as', 'asec', 'asech', 'asin',
+ 'asinh', 'atan', 'atan2', 'atanh', 'attr', 'attrs', 'average',
+ 'before', 'benchmark', 'blend', 'break', 'builtins1',
+ 'builtins2', 'call', 'capitalize', 'case', 'ceil', 'chop',
+ 'chunk', 'clear', 'close', 'cluster', 'color', 'combine',
+ 'conj', 'continue', 'copy', 'cos', 'cosh', 'couple', 'csec',
+ 'csech', 'ctan', 'ctanh', 'cursor', 'darken', 'dec', 'decode',
+ 'decouple', 'define', 'delete', 'desaturate', 'deviation',
+ 'dictionary', 'difference', 'digest', 'digits', 'div', 'do',
+ 'download', 'drop', 'dup', 'e', 'else', 'empty', 'encode',
+ 'ensure', 'env', 'epsilon', 'escape', 'execute', 'exit', 'exp',
+ 'extend', 'extract', 'factors', 'false', 'fdiv', 'filter',
+ 'first', 'flatten', 'floor', 'fold', 'from', 'function',
+ 'gamma', 'gcd', 'get', 'goto', 'hash', 'help', 'hypot', 'if',
+ 'in', 'inc', 'indent', 'index', 'infinity', 'info', 'input',
+ 'insert', 'inspect', 'intersection', 'invert', 'join', 'keys',
+ 'kurtosis', 'last', 'let', 'levenshtein', 'lighten', 'list',
+ 'ln', 'log', 'loop', 'lower', 'mail', 'map', 'match', 'max',
+ 'maybe', 'median', 'min', 'mod', 'module', 'mul', 'nand',
+ 'neg', 'new', 'nor', 'normalize', 'not', 'now', 'null', 'open',
+ 'or', 'outdent', 'pad', 'panic', 'path', 'pause',
+ 'permissions', 'permutate', 'pi', 'pop', 'pow', 'powerset',
+ 'powmod', 'prefix', 'print', 'prints', 'process', 'product',
+ 'query', 'random', 'range', 'read', 'relative', 'remove',
+ 'rename', 'render', 'repeat', 'replace', 'request', 'return',
+ 'reverse', 'round', 'sample', 'saturate', 'script', 'sec',
+ 'sech', 'select', 'serve', 'set', 'shl', 'shr', 'shuffle',
+ 'sin', 'sinh', 'size', 'skewness', 'slice', 'sort', 'split',
+ 'sqrt', 'squeeze', 'stack', 'strip', 'sub', 'suffix', 'sum',
+ 'switch', 'symbols', 'symlink', 'sys', 'take', 'tan', 'tanh',
+ 'terminal', 'to', 'true', 'truncate', 'try', 'type', 'union',
+ 'unique', 'unless', 'until', 'unzip', 'upper', 'values', 'var',
+ 'variance', 'volume', 'webview', 'while', 'with', 'wordwrap',
+ 'write', 'xnor', 'xor', 'zip'
+ ), prefix=r'\b', suffix=r'\b'), Name.Builtin)
+ ],
+
+ }
diff --git a/pygments/lexers/asc.py b/pygments/lexers/asc.py
new file mode 100644
index 0000000..4154467
--- /dev/null
+++ b/pygments/lexers/asc.py
@@ -0,0 +1,55 @@
+"""
+ pygments.lexers.asc
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for various ASCII armored files.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import re
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Generic, Name, Operator, String, Whitespace
+
+__all__ = ['AscLexer']
+
+
+class AscLexer(RegexLexer):
+ """
+ Lexer for ASCII armored files, containing `-----BEGIN/END ...-----` wrapped
+ base64 data.
+
+ .. versionadded:: 2.10
+ """
+ name = 'ASCII armored'
+ aliases = ['asc', 'pem']
+ filenames = [
+ '*.asc', # PGP; *.gpg, *.pgp, and *.sig too, but those can be binary
+ '*.pem', # X.509; *.cer, *.crt, *.csr, and key etc too, but those can be binary
+ 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk',
+ 'id_rsa', # SSH private keys
+ ]
+ mimetypes = ['application/pgp-keys', 'application/pgp-encrypted',
+ 'application/pgp-signature']
+
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'^-----BEGIN [^\n]+-----$', Generic.Heading, 'data'),
+ (r'\S+', Comment),
+ ],
+ 'data': [
+ (r'\s+', Whitespace),
+ (r'^([^:]+)(:)([ \t]+)(.*)',
+ bygroups(Name.Attribute, Operator, Whitespace, String)),
+ (r'^-----END [^\n]+-----$', Generic.Heading, 'root'),
+ (r'\S+', String),
+ ],
+ }
+
+ def analyse_text(text):
+ if re.search(r'^-----BEGIN [^\n]+-----\r?\n', text):
+ return True
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
new file mode 100644
index 0000000..734f986
--- /dev/null
+++ b/pygments/lexers/asm.py
@@ -0,0 +1,1037 @@
+"""
+ pygments.lexers.asm
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for assembly languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, words, \
+ DelegatingLexer, default
+from pygments.lexers.c_cpp import CppLexer, CLexer
+from pygments.lexers.d import DLexer
+from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
+ Other, Keyword, Operator, Whitespace
+
+__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer',
+ 'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'LlvmMirBodyLexer',
+ 'LlvmMirLexer', 'NasmLexer', 'NasmObjdumpLexer', 'TasmLexer',
+ 'Ca65Lexer', 'Dasm16Lexer']
+
+
+class GasLexer(RegexLexer):
+ """
+ For Gas (AT&T) assembly code.
+ """
+ name = 'GAS'
+ aliases = ['gas', 'asm']
+ filenames = ['*.s', '*.S']
+ mimetypes = ['text/x-gas']
+
+ #: optional Comment or Whitespace
+ string = r'"(\\"|[^"])*"'
+ char = r'[\w$.@-]'
+ identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
+ number = r'(?:0[xX][a-fA-F0-9]+|#?-?\d+)'
+ register = '%' + identifier + r'\b'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ (identifier + ':', Name.Label),
+ (r'\.' + identifier, Name.Attribute, 'directive-args'),
+ (r'lock|rep(n?z)?|data\d+', Name.Attribute),
+ (identifier, Name.Function, 'instruction-args'),
+ (r'[\r\n]+', Text)
+ ],
+ 'directive-args': [
+ (identifier, Name.Constant),
+ (string, String),
+ ('@' + identifier, Name.Attribute),
+ (number, Number.Integer),
+ (register, Name.Variable),
+ (r'[\r\n]+', Whitespace, '#pop'),
+ (r'([;#]|//).*?\n', Comment.Single, '#pop'),
+ (r'/[*].*?[*]/', Comment.Multiline),
+ (r'/[*].*?\n[\w\W]*?[*]/', Comment.Multiline, '#pop'),
+
+ include('punctuation'),
+ include('whitespace')
+ ],
+ 'instruction-args': [
+ # For objdump-disassembled code, shouldn't occur in
+ # actual assembler input
+ ('([a-z0-9]+)( )(<)('+identifier+')(>)',
+ bygroups(Number.Hex, Text, Punctuation, Name.Constant,
+ Punctuation)),
+ ('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
+ bygroups(Number.Hex, Text, Punctuation, Name.Constant,
+ Punctuation, Number.Integer, Punctuation)),
+
+ # Address constants
+ (identifier, Name.Constant),
+ (number, Number.Integer),
+ # Registers
+ (register, Name.Variable),
+ # Numeric constants
+ ('$'+number, Number.Integer),
+ (r"$'(.|\\')'", String.Char),
+ (r'[\r\n]+', Whitespace, '#pop'),
+ (r'([;#]|//).*?\n', Comment.Single, '#pop'),
+ (r'/[*].*?[*]/', Comment.Multiline),
+ (r'/[*].*?\n[\w\W]*?[*]/', Comment.Multiline, '#pop'),
+
+ include('punctuation'),
+ include('whitespace')
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'([;#]|//).*?\n', Comment.Single),
+ (r'/[*][\w\W]*?[*]/', Comment.Multiline)
+ ],
+ 'punctuation': [
+ (r'[-*,.()\[\]!:{}]+', Punctuation)
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r'^\.(text|data|section)', text, re.M):
+ return True
+ elif re.search(r'^\.\w+', text, re.M):
+ return 0.1
+
+
+def _objdump_lexer_tokens(asm_lexer):
+ """
+ Common objdump lexer tokens to wrap an ASM lexer.
+ """
+ hex_re = r'[0-9A-Za-z]'
+ return {
+ 'root': [
+ # File name & format:
+ ('(.*?)(:)( +file format )(.*?)$',
+ bygroups(Name.Label, Punctuation, Text, String)),
+ # Section header
+ ('(Disassembly of section )(.*?)(:)$',
+ bygroups(Text, Name.Label, Punctuation)),
+ # Function labels
+ # (With offset)
+ ('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
+ bygroups(Number.Hex, Whitespace, Punctuation, Name.Function,
+ Punctuation, Number.Hex, Punctuation)),
+ # (Without offset)
+ ('('+hex_re+'+)( )(<)(.*?)(>:)$',
+ bygroups(Number.Hex, Whitespace, Punctuation, Name.Function,
+ Punctuation)),
+ # Code line with disassembled instructions
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
+ bygroups(Whitespace, Name.Label, Whitespace, Number.Hex, Whitespace,
+ using(asm_lexer))),
+ # Code line without raw instructions (objdump --no-show-raw-insn)
+ ('( *)('+hex_re+r'+:)( *\t)([a-zA-Z].*?)$',
+ bygroups(Whitespace, Name.Label, Whitespace,
+ using(asm_lexer))),
+ # Code line with ascii
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
+ bygroups(Whitespace, Name.Label, Whitespace, Number.Hex, Whitespace, String)),
+ # Continued code line, only raw opcodes without disassembled
+ # instruction
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
+ bygroups(Whitespace, Name.Label, Whitespace, Number.Hex)),
+ # Skipped a few bytes
+ (r'\t\.\.\.$', Text),
+ # Relocation line
+ # (With offset)
+ (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
+ bygroups(Whitespace, Name.Label, Whitespace, Name.Property, Whitespace,
+ Name.Constant, Punctuation, Number.Hex)),
+ # (Without offset)
+ (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
+ bygroups(Whitespace, Name.Label, Whitespace, Name.Property, Whitespace,
+ Name.Constant)),
+ (r'[^\n]+\n', Other)
+ ]
+ }
+
+
+class ObjdumpLexer(RegexLexer):
+ """
+ For the output of ``objdump -dr``.
+ """
+ name = 'objdump'
+ aliases = ['objdump']
+ filenames = ['*.objdump']
+ mimetypes = ['text/x-objdump']
+
+ tokens = _objdump_lexer_tokens(GasLexer)
+
+
+class DObjdumpLexer(DelegatingLexer):
+ """
+ For the output of ``objdump -Sr`` on compiled D files.
+ """
+ name = 'd-objdump'
+ aliases = ['d-objdump']
+ filenames = ['*.d-objdump']
+ mimetypes = ['text/x-d-objdump']
+
+ def __init__(self, **options):
+ super().__init__(DLexer, ObjdumpLexer, **options)
+
+
+class CppObjdumpLexer(DelegatingLexer):
+ """
+ For the output of ``objdump -Sr`` on compiled C++ files.
+ """
+ name = 'cpp-objdump'
+ aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
+ filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
+ mimetypes = ['text/x-cpp-objdump']
+
+ def __init__(self, **options):
+ super().__init__(CppLexer, ObjdumpLexer, **options)
+
+
+class CObjdumpLexer(DelegatingLexer):
+ """
+ For the output of ``objdump -Sr`` on compiled C files.
+ """
+ name = 'c-objdump'
+ aliases = ['c-objdump']
+ filenames = ['*.c-objdump']
+ mimetypes = ['text/x-c-objdump']
+
+ def __init__(self, **options):
+ super().__init__(CLexer, ObjdumpLexer, **options)
+
+
+class HsailLexer(RegexLexer):
+ """
+ For HSAIL assembly code.
+
+ .. versionadded:: 2.2
+ """
+ name = 'HSAIL'
+ aliases = ['hsail', 'hsa']
+ filenames = ['*.hsail']
+ mimetypes = ['text/x-hsail']
+
+ string = r'"[^"]*?"'
+ identifier = r'[a-zA-Z_][\w.]*'
+ # Registers
+ register_number = r'[0-9]+'
+ register = r'(\$(c|s|d|q)' + register_number + r')\b'
+ # Qualifiers
+ alignQual = r'(align\(\d+\))'
+ widthQual = r'(width\((\d+|all)\))'
+ allocQual = r'(alloc\(agent\))'
+ # Instruction Modifiers
+ roundingMod = (r'((_ftz)?(_up|_down|_zero|_near))')
+ datatypeMod = (r'_('
+ # packedTypes
+ r'u8x4|s8x4|u16x2|s16x2|u8x8|s8x8|u16x4|s16x4|u32x2|s32x2|'
+ r'u8x16|s8x16|u16x8|s16x8|u32x4|s32x4|u64x2|s64x2|'
+ r'f16x2|f16x4|f16x8|f32x2|f32x4|f64x2|'
+ # baseTypes
+ r'u8|s8|u16|s16|u32|s32|u64|s64|'
+ r'b128|b8|b16|b32|b64|b1|'
+ r'f16|f32|f64|'
+ # opaqueType
+ r'roimg|woimg|rwimg|samp|sig32|sig64)')
+
+ # Numeric Constant
+ float = r'((\d+\.)|(\d*\.\d+))[eE][+-]?\d+'
+ hexfloat = r'0[xX](([0-9a-fA-F]+\.[0-9a-fA-F]*)|([0-9a-fA-F]*\.[0-9a-fA-F]+))[pP][+-]?\d+'
+ ieeefloat = r'0((h|H)[0-9a-fA-F]{4}|(f|F)[0-9a-fA-F]{8}|(d|D)[0-9a-fA-F]{16})'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+
+ (string, String),
+
+ (r'@' + identifier + ':?', Name.Label),
+
+ (register, Name.Variable.Anonymous),
+
+ include('keyword'),
+
+ (r'&' + identifier, Name.Variable.Global),
+ (r'%' + identifier, Name.Variable),
+
+ (hexfloat, Number.Hex),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (ieeefloat, Number.Float),
+ (float, Number.Float),
+ (r'\d+', Number.Integer),
+
+ (r'[=<>{}\[\]()*.,:;!]|x\b', Punctuation)
+ ],
+ 'whitespace': [
+ (r'(\n|\s)+', Whitespace),
+ ],
+ 'comments': [
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'//.*?\n', Comment.Single),
+ ],
+ 'keyword': [
+ # Types
+ (r'kernarg' + datatypeMod, Keyword.Type),
+
+ # Regular keywords
+ (r'\$(full|base|small|large|default|zero|near)', Keyword),
+ (words((
+ 'module', 'extension', 'pragma', 'prog', 'indirect', 'signature',
+ 'decl', 'kernel', 'function', 'enablebreakexceptions',
+ 'enabledetectexceptions', 'maxdynamicgroupsize', 'maxflatgridsize',
+ 'maxflatworkgroupsize', 'requireddim', 'requiredgridsize',
+ 'requiredworkgroupsize', 'requirenopartialworkgroups'),
+ suffix=r'\b'), Keyword),
+
+ # instructions
+ (roundingMod, Keyword),
+ (datatypeMod, Keyword),
+ (r'_(' + alignQual + '|' + widthQual + ')', Keyword),
+ (r'_kernarg', Keyword),
+ (r'(nop|imagefence)\b', Keyword),
+ (words((
+ 'cleardetectexcept', 'clock', 'cuid', 'debugtrap', 'dim',
+ 'getdetectexcept', 'groupbaseptr', 'kernargbaseptr', 'laneid',
+ 'maxcuid', 'maxwaveid', 'packetid', 'setdetectexcept', 'waveid',
+ 'workitemflatabsid', 'workitemflatid', 'nullptr', 'abs', 'bitrev',
+ 'currentworkgroupsize', 'currentworkitemflatid', 'fract', 'ncos',
+ 'neg', 'nexp2', 'nlog2', 'nrcp', 'nrsqrt', 'nsin', 'nsqrt',
+ 'gridgroups', 'gridsize', 'not', 'sqrt', 'workgroupid',
+ 'workgroupsize', 'workitemabsid', 'workitemid', 'ceil', 'floor',
+ 'rint', 'trunc', 'add', 'bitmask', 'borrow', 'carry', 'copysign',
+ 'div', 'rem', 'sub', 'shl', 'shr', 'and', 'or', 'xor', 'unpackhi',
+ 'unpacklo', 'max', 'min', 'fma', 'mad', 'bitextract', 'bitselect',
+ 'shuffle', 'cmov', 'bitalign', 'bytealign', 'lerp', 'nfma', 'mul',
+ 'mulhi', 'mul24hi', 'mul24', 'mad24', 'mad24hi', 'bitinsert',
+ 'combine', 'expand', 'lda', 'mov', 'pack', 'unpack', 'packcvt',
+ 'unpackcvt', 'sad', 'sementp', 'ftos', 'stof', 'cmp', 'ld', 'st',
+ '_eq', '_ne', '_lt', '_le', '_gt', '_ge', '_equ', '_neu', '_ltu',
+ '_leu', '_gtu', '_geu', '_num', '_nan', '_seq', '_sne', '_slt',
+ '_sle', '_sgt', '_sge', '_snum', '_snan', '_sequ', '_sneu', '_sltu',
+ '_sleu', '_sgtu', '_sgeu', 'atomic', '_ld', '_st', '_cas', '_add',
+ '_and', '_exch', '_max', '_min', '_or', '_sub', '_wrapdec',
+ '_wrapinc', '_xor', 'ret', 'cvt', '_readonly', '_kernarg', '_global',
+ 'br', 'cbr', 'sbr', '_scacq', '_screl', '_scar', '_rlx', '_wave',
+ '_wg', '_agent', '_system', 'ldimage', 'stimage', '_v2', '_v3', '_v4',
+ '_1d', '_2d', '_3d', '_1da', '_2da', '_1db', '_2ddepth', '_2dadepth',
+ '_width', '_height', '_depth', '_array', '_channelorder',
+ '_channeltype', 'querysampler', '_coord', '_filter', '_addressing',
+ 'barrier', 'wavebarrier', 'initfbar', 'joinfbar', 'waitfbar',
+ 'arrivefbar', 'leavefbar', 'releasefbar', 'ldf', 'activelaneid',
+ 'activelanecount', 'activelanemask', 'activelanepermute', 'call',
+ 'scall', 'icall', 'alloca', 'packetcompletionsig',
+ 'addqueuewriteindex', 'casqueuewriteindex', 'ldqueuereadindex',
+ 'stqueuereadindex', 'readonly', 'global', 'private', 'group',
+ 'spill', 'arg', '_upi', '_downi', '_zeroi', '_neari', '_upi_sat',
+ '_downi_sat', '_zeroi_sat', '_neari_sat', '_supi', '_sdowni',
+ '_szeroi', '_sneari', '_supi_sat', '_sdowni_sat', '_szeroi_sat',
+ '_sneari_sat', '_pp', '_ps', '_sp', '_ss', '_s', '_p', '_pp_sat',
+ '_ps_sat', '_sp_sat', '_ss_sat', '_s_sat', '_p_sat')), Keyword),
+
+ # Integer types
+ (r'i[1-9]\d*', Keyword)
+ ]
+ }
+
+
+class LlvmLexer(RegexLexer):
+ """
+ For LLVM assembly code.
+ """
+ name = 'LLVM'
+ url = 'https://llvm.org/docs/LangRef.html'
+ aliases = ['llvm']
+ filenames = ['*.ll']
+ mimetypes = ['text/x-llvm']
+
+ #: optional Comment or Whitespace
+ string = r'"[^"]*?"'
+ identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')'
+ block_label = r'(' + identifier + r'|(\d+))'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+
+ # Before keywords, because keywords are valid label names :(...
+ (block_label + r'\s*:', Name.Label),
+
+ include('keyword'),
+
+ (r'%' + identifier, Name.Variable),
+ (r'@' + identifier, Name.Variable.Global),
+ (r'%\d+', Name.Variable.Anonymous),
+ (r'@\d+', Name.Variable.Global),
+ (r'#\d+', Name.Variable.Global),
+ (r'!' + identifier, Name.Variable),
+ (r'!\d+', Name.Variable.Anonymous),
+ (r'c?' + string, String),
+
+ (r'0[xX][a-fA-F0-9]+', Number),
+ (r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
+
+ (r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
+ ],
+ 'whitespace': [
+ (r'(\n|\s+)+', Whitespace),
+ (r';.*?\n', Comment)
+ ],
+ 'keyword': [
+ # Regular keywords
+ (words((
+ 'aarch64_sve_vector_pcs', 'aarch64_vector_pcs', 'acq_rel',
+ 'acquire', 'add', 'addrspace', 'addrspacecast', 'afn', 'alias',
+ 'aliasee', 'align', 'alignLog2', 'alignstack', 'alloca',
+ 'allocsize', 'allOnes', 'alwaysinline', 'alwaysInline',
+ 'amdgpu_cs', 'amdgpu_es', 'amdgpu_gfx', 'amdgpu_gs',
+ 'amdgpu_hs', 'amdgpu_kernel', 'amdgpu_ls', 'amdgpu_ps',
+ 'amdgpu_vs', 'and', 'any', 'anyregcc', 'appending', 'arcp',
+ 'argmemonly', 'args', 'arm_aapcs_vfpcc', 'arm_aapcscc',
+ 'arm_apcscc', 'ashr', 'asm', 'atomic', 'atomicrmw',
+ 'attributes', 'available_externally', 'avr_intrcc',
+ 'avr_signalcc', 'bit', 'bitcast', 'bitMask', 'blockaddress',
+ 'blockcount', 'br', 'branchFunnel', 'builtin', 'byArg',
+ 'byref', 'byte', 'byteArray', 'byval', 'c', 'call', 'callbr',
+ 'callee', 'caller', 'calls', 'canAutoHide', 'catch',
+ 'catchpad', 'catchret', 'catchswitch', 'cc', 'ccc',
+ 'cfguard_checkcc', 'cleanup', 'cleanuppad', 'cleanupret',
+ 'cmpxchg', 'cold', 'coldcc', 'comdat', 'common', 'constant',
+ 'contract', 'convergent', 'critical', 'cxx_fast_tlscc',
+ 'datalayout', 'declare', 'default', 'define', 'deplibs',
+ 'dereferenceable', 'dereferenceable_or_null', 'distinct',
+ 'dllexport', 'dllimport', 'dso_local', 'dso_local_equivalent',
+ 'dso_preemptable', 'dsoLocal', 'eq', 'exact', 'exactmatch',
+ 'extern_weak', 'external', 'externally_initialized',
+ 'extractelement', 'extractvalue', 'fadd', 'false', 'fast',
+ 'fastcc', 'fcmp', 'fdiv', 'fence', 'filter', 'flags', 'fmul',
+ 'fneg', 'fpext', 'fptosi', 'fptoui', 'fptrunc', 'freeze',
+ 'frem', 'from', 'fsub', 'funcFlags', 'function', 'gc',
+ 'getelementptr', 'ghccc', 'global', 'guid', 'gv', 'hash',
+ 'hhvm_ccc', 'hhvmcc', 'hidden', 'hot', 'hotness', 'icmp',
+ 'ifunc', 'inaccessiblemem_or_argmemonly',
+ 'inaccessiblememonly', 'inalloca', 'inbounds', 'indir',
+ 'indirectbr', 'info', 'initialexec', 'inline', 'inlineBits',
+ 'inlinehint', 'inrange', 'inreg', 'insertelement',
+ 'insertvalue', 'insts', 'intel_ocl_bicc', 'inteldialect',
+ 'internal', 'inttoptr', 'invoke', 'jumptable', 'kind',
+ 'landingpad', 'largest', 'linkage', 'linkonce', 'linkonce_odr',
+ 'live', 'load', 'local_unnamed_addr', 'localdynamic',
+ 'localexec', 'lshr', 'max', 'metadata', 'min', 'minsize',
+ 'module', 'monotonic', 'msp430_intrcc', 'mul', 'mustprogress',
+ 'musttail', 'naked', 'name', 'nand', 'ne', 'nest', 'ninf',
+ 'nnan', 'noalias', 'nobuiltin', 'nocallback', 'nocapture',
+ 'nocf_check', 'noduplicate', 'noduplicates', 'nofree',
+ 'noimplicitfloat', 'noinline', 'noInline', 'nomerge', 'none',
+ 'nonlazybind', 'nonnull', 'noprofile', 'norecurse',
+ 'noRecurse', 'noredzone', 'noreturn', 'nosync', 'notail',
+ 'notEligibleToImport', 'noundef', 'nounwind', 'nsw',
+ 'nsz', 'null', 'null_pointer_is_valid', 'nuw', 'oeq', 'offset',
+ 'oge', 'ogt', 'ole', 'olt', 'one', 'opaque', 'optforfuzzing',
+ 'optnone', 'optsize', 'or', 'ord', 'param', 'params',
+ 'partition', 'path', 'personality', 'phi', 'poison',
+ 'preallocated', 'prefix', 'preserve_allcc', 'preserve_mostcc',
+ 'private', 'prologue', 'protected', 'ptrtoint', 'ptx_device',
+ 'ptx_kernel', 'readnone', 'readNone', 'readonly', 'readOnly',
+ 'reassoc', 'refs', 'relbf', 'release', 'resByArg', 'resume',
+ 'ret', 'returnDoesNotAlias', 'returned', 'returns_twice',
+ 'safestack', 'samesize', 'sanitize_address',
+ 'sanitize_hwaddress', 'sanitize_memory', 'sanitize_memtag',
+ 'sanitize_thread', 'sdiv', 'section', 'select', 'seq_cst',
+ 'sext', 'sge', 'sgt', 'shadowcallstack', 'shl',
+ 'shufflevector', 'sideeffect', 'signext', 'single',
+ 'singleImpl', 'singleImplName', 'sitofp', 'sizeM1',
+ 'sizeM1BitWidth', 'sle', 'slt', 'source_filename',
+ 'speculatable', 'speculative_load_hardening', 'spir_func',
+ 'spir_kernel', 'srem', 'sret', 'ssp', 'sspreq', 'sspstrong',
+ 'store', 'strictfp', 'sub', 'summaries', 'summary', 'swiftcc',
+ 'swifterror', 'swiftself', 'switch', 'syncscope', 'tail',
+ 'tailcc', 'target', 'thread_local', 'to', 'token', 'triple',
+ 'true', 'trunc', 'type', 'typeCheckedLoadConstVCalls',
+ 'typeCheckedLoadVCalls', 'typeid', 'typeidCompatibleVTable',
+ 'typeIdInfo', 'typeTestAssumeConstVCalls',
+ 'typeTestAssumeVCalls', 'typeTestRes', 'typeTests', 'udiv',
+ 'ueq', 'uge', 'ugt', 'uitofp', 'ule', 'ult', 'umax', 'umin',
+ 'undef', 'une', 'uniformRetVal', 'uniqueRetVal', 'unknown',
+ 'unnamed_addr', 'uno', 'unordered', 'unreachable', 'unsat',
+ 'unwind', 'urem', 'uselistorder', 'uselistorder_bb', 'uwtable',
+ 'va_arg', 'varFlags', 'variable', 'vcall_visibility',
+ 'vFuncId', 'virtFunc', 'virtualConstProp', 'void', 'volatile',
+ 'vscale', 'vTableFuncs', 'weak', 'weak_odr', 'webkit_jscc',
+ 'win64cc', 'within', 'wpdRes', 'wpdResolutions', 'writeonly',
+ 'x', 'x86_64_sysvcc', 'x86_fastcallcc', 'x86_intrcc',
+ 'x86_mmx', 'x86_regcallcc', 'x86_stdcallcc', 'x86_thiscallcc',
+ 'x86_vectorcallcc', 'xchg', 'xor', 'zeroext',
+ 'zeroinitializer', 'zext', 'immarg', 'willreturn'),
+ suffix=r'\b'), Keyword),
+
+ # Types
+ (words(('void', 'half', 'bfloat', 'float', 'double', 'fp128',
+ 'x86_fp80', 'ppc_fp128', 'label', 'metadata', 'x86_mmx',
+ 'x86_amx', 'token', 'ptr')),
+ Keyword.Type),
+
+ # Integer types
+ (r'i[1-9]\d*', Keyword.Type)
+ ]
+ }
+
+
+class LlvmMirBodyLexer(RegexLexer):
+ """
+ For LLVM MIR examples without the YAML wrapper.
+
+ .. versionadded:: 2.6
+ """
+ name = 'LLVM-MIR Body'
+ url = 'https://llvm.org/docs/MIRLangRef.html'
+ aliases = ['llvm-mir-body']
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ # Attributes on basic blocks
+ (words(('liveins', 'successors'), suffix=':'), Keyword),
+ # Basic Block Labels
+ (r'bb\.[0-9]+(\.[a-zA-Z0-9_.-]+)?( \(address-taken\))?:', Name.Label),
+ (r'bb\.[0-9]+ \(%[a-zA-Z0-9_.-]+\)( \(address-taken\))?:', Name.Label),
+ (r'%bb\.[0-9]+(\.\w+)?', Name.Label),
+ # Stack references
+ (r'%stack\.[0-9]+(\.\w+\.addr)?', Name),
+ # Subreg indices
+ (r'%subreg\.\w+', Name),
+ # Virtual registers
+ (r'%[a-zA-Z0-9_]+ *', Name.Variable, 'vreg'),
+ # Reference to LLVM-IR global
+ include('global'),
+ # Reference to Intrinsic
+ (r'intrinsic\(\@[a-zA-Z0-9_.]+\)', Name.Variable.Global),
+ # Comparison predicates
+ (words(('eq', 'ne', 'sgt', 'sge', 'slt', 'sle', 'ugt', 'uge', 'ult',
+ 'ule'), prefix=r'intpred\(', suffix=r'\)'), Name.Builtin),
+ (words(('oeq', 'one', 'ogt', 'oge', 'olt', 'ole', 'ugt', 'uge',
+ 'ult', 'ule'), prefix=r'floatpred\(', suffix=r'\)'),
+ Name.Builtin),
+ # Physical registers
+ (r'\$\w+', String.Single),
+ # Assignment operator
+ (r'=', Operator),
+ # gMIR Opcodes
+ (r'(G_ANYEXT|G_[SZ]EXT|G_SEXT_INREG|G_TRUNC|G_IMPLICIT_DEF|G_PHI|'
+ r'G_FRAME_INDEX|G_GLOBAL_VALUE|G_INTTOPTR|G_PTRTOINT|G_BITCAST|'
+ r'G_CONSTANT|G_FCONSTANT|G_VASTART|G_VAARG|G_CTLZ|G_CTLZ_ZERO_UNDEF|'
+ r'G_CTTZ|G_CTTZ_ZERO_UNDEF|G_CTPOP|G_BSWAP|G_BITREVERSE|'
+ r'G_ADDRSPACE_CAST|G_BLOCK_ADDR|G_JUMP_TABLE|G_DYN_STACKALLOC|'
+ r'G_ADD|G_SUB|G_MUL|G_[SU]DIV|G_[SU]REM|G_AND|G_OR|G_XOR|G_SHL|'
+ r'G_[LA]SHR|G_[IF]CMP|G_SELECT|G_GEP|G_PTR_MASK|G_SMIN|G_SMAX|'
+ r'G_UMIN|G_UMAX|G_[US]ADDO|G_[US]ADDE|G_[US]SUBO|G_[US]SUBE|'
+ r'G_[US]MULO|G_[US]MULH|G_FNEG|G_FPEXT|G_FPTRUNC|G_FPTO[US]I|'
+ r'G_[US]ITOFP|G_FABS|G_FCOPYSIGN|G_FCANONICALIZE|G_FMINNUM|'
+ r'G_FMAXNUM|G_FMINNUM_IEEE|G_FMAXNUM_IEEE|G_FMINIMUM|G_FMAXIMUM|'
+ r'G_FADD|G_FSUB|G_FMUL|G_FMA|G_FMAD|G_FDIV|G_FREM|G_FPOW|G_FEXP|'
+ r'G_FEXP2|G_FLOG|G_FLOG2|G_FLOG10|G_FCEIL|G_FCOS|G_FSIN|G_FSQRT|'
+ r'G_FFLOOR|G_FRINT|G_FNEARBYINT|G_INTRINSIC_TRUNC|'
+ r'G_INTRINSIC_ROUND|G_LOAD|G_[ZS]EXTLOAD|G_INDEXED_LOAD|'
+ r'G_INDEXED_[ZS]EXTLOAD|G_STORE|G_INDEXED_STORE|'
+ r'G_ATOMIC_CMPXCHG_WITH_SUCCESS|G_ATOMIC_CMPXCHG|'
+ r'G_ATOMICRMW_(XCHG|ADD|SUB|AND|NAND|OR|XOR|MAX|MIN|UMAX|UMIN|FADD|'
+ r'FSUB)'
+ r'|G_FENCE|G_EXTRACT|G_UNMERGE_VALUES|G_INSERT|G_MERGE_VALUES|'
+ r'G_BUILD_VECTOR|G_BUILD_VECTOR_TRUNC|G_CONCAT_VECTORS|'
+ r'G_INTRINSIC|G_INTRINSIC_W_SIDE_EFFECTS|G_BR|G_BRCOND|'
+ r'G_BRINDIRECT|G_BRJT|G_INSERT_VECTOR_ELT|G_EXTRACT_VECTOR_ELT|'
+ r'G_SHUFFLE_VECTOR)\b',
+ Name.Builtin),
+ # Target independent opcodes
+ (r'(COPY|PHI|INSERT_SUBREG|EXTRACT_SUBREG|REG_SEQUENCE)\b',
+ Name.Builtin),
+ # Flags
+ (words(('killed', 'implicit')), Keyword),
+ # ConstantInt values
+ (r'(i[0-9]+)( +)', bygroups(Keyword.Type, Whitespace), 'constantint'),
+ # ConstantFloat values
+ (r'(half|float|double) +', Keyword.Type, 'constantfloat'),
+ # Bare immediates
+ include('integer'),
+ # MMO's
+ (r'(::)( *)', bygroups(Operator, Whitespace), 'mmo'),
+ # MIR Comments
+ (r';.*', Comment),
+ # If we get here, assume it's a target instruction
+ (r'[a-zA-Z0-9_]+', Name),
+ # Everything else that isn't highlighted
+ (r'[(), \n]+', Text),
+ ],
+ # The integer constant from a ConstantInt value
+ 'constantint': [
+ include('integer'),
+ (r'(?=.)', Text, '#pop'),
+ ],
+ # The floating point constant from a ConstantFloat value
+ 'constantfloat': [
+ include('float'),
+ (r'(?=.)', Text, '#pop'),
+ ],
+ 'vreg': [
+ # The bank or class if there is one
+ (r'( *)(:(?!:))', bygroups(Whitespace, Keyword), ('#pop', 'vreg_bank_or_class')),
+ # The LLT if there is one
+ (r'( *)(\()', bygroups(Whitespace, Text), 'vreg_type'),
+ (r'(?=.)', Text, '#pop'),
+ ],
+ 'vreg_bank_or_class': [
+ # The unassigned bank/class
+ (r'( *)(_)', bygroups(Whitespace, Name.Variable.Magic)),
+ (r'( *)([a-zA-Z0-9_]+)', bygroups(Whitespace, Name.Variable)),
+ # The LLT if there is one
+ (r'( *)(\()', bygroups(Whitespace, Text), 'vreg_type'),
+ (r'(?=.)', Text, '#pop'),
+ ],
+ 'vreg_type': [
+ # Scalar and pointer types
+ (r'( *)([sp][0-9]+)', bygroups(Whitespace, Keyword.Type)),
+ (r'( *)(<[0-9]+ *x *[sp][0-9]+>)', bygroups(Whitespace, Keyword.Type)),
+ (r'\)', Text, '#pop'),
+ (r'(?=.)', Text, '#pop'),
+ ],
+ 'mmo': [
+ (r'\(', Text),
+ (r' +', Whitespace),
+ (words(('load', 'store', 'on', 'into', 'from', 'align', 'monotonic',
+ 'acquire', 'release', 'acq_rel', 'seq_cst')),
+ Keyword),
+ # IR references
+ (r'%ir\.[a-zA-Z0-9_.-]+', Name),
+ (r'%ir-block\.[a-zA-Z0-9_.-]+', Name),
+ (r'[-+]', Operator),
+ include('integer'),
+ include('global'),
+ (r',', Punctuation),
+ (r'\), \(', Text),
+ (r'\)', Text, '#pop'),
+ ],
+ 'integer': [(r'-?[0-9]+', Number.Integer),],
+ 'float': [(r'-?[0-9]+\.[0-9]+(e[+-][0-9]+)?', Number.Float)],
+ 'global': [(r'\@[a-zA-Z0-9_.]+', Name.Variable.Global)],
+ }
+
+
+class LlvmMirLexer(RegexLexer):
+ """
+ Lexer for the overall LLVM MIR document format.
+
+ MIR is a human readable serialization format that's used to represent LLVM's
+ machine specific intermediate representation. It allows LLVM's developers to
+ see the state of the compilation process at various points, as well as test
+ individual pieces of the compiler.
+
+ .. versionadded:: 2.6
+ """
+ name = 'LLVM-MIR'
+ url = 'https://llvm.org/docs/MIRLangRef.html'
+ aliases = ['llvm-mir']
+ filenames = ['*.mir']
+
+ tokens = {
+ 'root': [
+ # Comments are hashes at the YAML level
+ (r'#.*', Comment),
+ # Documents starting with | are LLVM-IR
+ (r'--- \|$', Keyword, 'llvm_ir'),
+ # Other documents are MIR
+ (r'---', Keyword, 'llvm_mir'),
+ # Consume everything else in one token for efficiency
+ (r'[^-#]+|.', Text),
+ ],
+ 'llvm_ir': [
+ # Documents end with '...' or '---'
+ (r'(\.\.\.|(?=---))', Keyword, '#pop'),
+ # Delegate to the LlvmLexer
+ (r'((?:.|\n)+?)(?=(\.\.\.|---))', bygroups(using(LlvmLexer))),
+ ],
+ 'llvm_mir': [
+ # Comments are hashes at the YAML level
+ (r'#.*', Comment),
+ # Documents end with '...' or '---'
+ (r'(\.\.\.|(?=---))', Keyword, '#pop'),
+ # Handle the simple attributes
+ (r'name:', Keyword, 'name'),
+ (words(('alignment', ),
+ suffix=':'), Keyword, 'number'),
+ (words(('legalized', 'regBankSelected', 'tracksRegLiveness',
+ 'selected', 'exposesReturnsTwice'),
+ suffix=':'), Keyword, 'boolean'),
+ # Handle the attributes don't highlight inside
+ (words(('registers', 'stack', 'fixedStack', 'liveins', 'frameInfo',
+ 'machineFunctionInfo'),
+ suffix=':'), Keyword),
+ # Delegate the body block to the LlvmMirBodyLexer
+ (r'body: *\|', Keyword, 'llvm_mir_body'),
+ # Consume everything else
+ (r'.+', Text),
+ (r'\n', Whitespace),
+ ],
+ 'name': [
+ (r'[^\n]+', Name),
+ default('#pop'),
+ ],
+ 'boolean': [
+ (r' *(true|false)', Name.Builtin),
+ default('#pop'),
+ ],
+ 'number': [
+ (r' *[0-9]+', Number),
+ default('#pop'),
+ ],
+ 'llvm_mir_body': [
+ # Documents end with '...' or '---'.
+ # We have to pop llvm_mir_body and llvm_mir
+ (r'(\.\.\.|(?=---))', Keyword, '#pop:2'),
+ # Delegate the body block to the LlvmMirBodyLexer
+ (r'((?:.|\n)+?)(?=\.\.\.|---)', bygroups(using(LlvmMirBodyLexer))),
+ # The '...' is optional. If we didn't already find it then it isn't
+ # there. There might be a '---' instead though.
+ (r'(?!\.\.\.|---)((?:.|\n)+)', bygroups(using(LlvmMirBodyLexer))),
+ ],
+ }
+
+
+class NasmLexer(RegexLexer):
+ """
+ For Nasm (Intel) assembly code.
+ """
+ name = 'NASM'
+ aliases = ['nasm']
+ filenames = ['*.asm', '*.ASM', '*.nasm']
+ mimetypes = ['text/x-nasm']
+
+ # Tasm uses the same file endings, but TASM is not as common as NASM, so
+ # we prioritize NASM higher by default
+ priority = 1.0
+
+ identifier = r'[a-z$._?][\w$.?#@~]*'
+ hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
+ octn = r'[0-7]+q'
+ binn = r'[01]+b'
+ decn = r'[0-9]+'
+ floatn = decn + r'\.e?' + decn
+ string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
+ declkw = r'(?:res|d)[bwdqt]|times'
+ register = (r'(r[0-9][0-5]?[bwd]?|'
+ r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
+ r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]|k[0-7]|'
+ r'[xyz]mm(?:[12][0-9]?|3[01]?|[04-9]))\b')
+ wordop = r'seg|wrt|strict|rel|abs'
+ type = r'byte|[dq]?word'
+ # Directives must be followed by whitespace, otherwise CPU will match
+ # cpuid for instance.
+ directives = (r'(?:BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
+ r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
+ r'EXPORT|LIBRARY|MODULE)(?=\s)')
+
+ flags = re.IGNORECASE | re.MULTILINE
+ tokens = {
+ 'root': [
+ (r'^\s*%', Comment.Preproc, 'preproc'),
+ include('whitespace'),
+ (identifier + ':', Name.Label),
+ (r'(%s)(\s+)(equ)' % identifier,
+ bygroups(Name.Constant, Whitespace, Keyword.Declaration),
+ 'instruction-args'),
+ (directives, Keyword, 'instruction-args'),
+ (declkw, Keyword.Declaration, 'instruction-args'),
+ (identifier, Name.Function, 'instruction-args'),
+ (r'[\r\n]+', Whitespace)
+ ],
+ 'instruction-args': [
+ (string, String),
+ (hexn, Number.Hex),
+ (octn, Number.Oct),
+ (binn, Number.Bin),
+ (floatn, Number.Float),
+ (decn, Number.Integer),
+ include('punctuation'),
+ (register, Name.Builtin),
+ (identifier, Name.Variable),
+ (r'[\r\n]+', Whitespace, '#pop'),
+ include('whitespace')
+ ],
+ 'preproc': [
+ (r'[^;\n]+', Comment.Preproc),
+ (r';.*?\n', Comment.Single, '#pop'),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'[ \t]+', Whitespace),
+ (r';.*', Comment.Single),
+ (r'#.*', Comment.Single)
+ ],
+ 'punctuation': [
+ (r'[,{}():\[\]]+', Punctuation),
+ (r'[&|^<>+*/%~-]+', Operator),
+ (r'[$]+', Keyword.Constant),
+ (wordop, Operator.Word),
+ (type, Keyword.Type)
+ ],
+ }
+
+ def analyse_text(text):
+ # Probably TASM
+ if re.match(r'PROC', text, re.IGNORECASE):
+ return False
+
+
+class NasmObjdumpLexer(ObjdumpLexer):
+ """
+ For the output of ``objdump -d -M intel``.
+
+ .. versionadded:: 2.0
+ """
+ name = 'objdump-nasm'
+ aliases = ['objdump-nasm']
+ filenames = ['*.objdump-intel']
+ mimetypes = ['text/x-nasm-objdump']
+
+ tokens = _objdump_lexer_tokens(NasmLexer)
+
+
+class TasmLexer(RegexLexer):
+ """
+ For Tasm (Turbo Assembler) assembly code.
+ """
+ name = 'TASM'
+ aliases = ['tasm']
+ filenames = ['*.asm', '*.ASM', '*.tasm']
+ mimetypes = ['text/x-tasm']
+
+ identifier = r'[@a-z$._?][\w$.?#@~]*'
+ hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
+ octn = r'[0-7]+q'
+ binn = r'[01]+b'
+ decn = r'[0-9]+'
+ floatn = decn + r'\.e?' + decn
+ string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
+ declkw = r'(?:res|d)[bwdqt]|times'
+ register = (r'(r[0-9][0-5]?[bwd]|'
+ r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
+ r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7])\b')
+ wordop = r'seg|wrt|strict'
+ type = r'byte|[dq]?word'
+ directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
+ r'ORG|ALIGN|STRUC|ENDSTRUC|ENDS|COMMON|CPU|GROUP|UPPERCASE|INCLUDE|'
+ r'EXPORT|LIBRARY|MODULE|PROC|ENDP|USES|ARG|DATASEG|UDATASEG|END|IDEAL|'
+ r'P386|MODEL|ASSUME|CODESEG|SIZE')
+ # T[A-Z][a-z] is more of a convention. Lexer should filter out STRUC definitions
+ # and then 'add' them to datatype somehow.
+ datatype = (r'db|dd|dw|T[A-Z][a-z]+')
+
+ flags = re.IGNORECASE | re.MULTILINE
+ tokens = {
+ 'root': [
+ (r'^\s*%', Comment.Preproc, 'preproc'),
+ include('whitespace'),
+ (identifier + ':', Name.Label),
+ (directives, Keyword, 'instruction-args'),
+ (r'(%s)(\s+)(%s)' % (identifier, datatype),
+ bygroups(Name.Constant, Whitespace, Keyword.Declaration),
+ 'instruction-args'),
+ (declkw, Keyword.Declaration, 'instruction-args'),
+ (identifier, Name.Function, 'instruction-args'),
+ (r'[\r\n]+', Whitespace)
+ ],
+ 'instruction-args': [
+ (string, String),
+ (hexn, Number.Hex),
+ (octn, Number.Oct),
+ (binn, Number.Bin),
+ (floatn, Number.Float),
+ (decn, Number.Integer),
+ include('punctuation'),
+ (register, Name.Builtin),
+ (identifier, Name.Variable),
+ # Do not match newline when it's preceded by a backslash
+ (r'(\\)(\s*)(;.*)([\r\n])',
+ bygroups(Text, Whitespace, Comment.Single, Whitespace)),
+ (r'[\r\n]+', Whitespace, '#pop'),
+ include('whitespace')
+ ],
+ 'preproc': [
+ (r'[^;\n]+', Comment.Preproc),
+ (r';.*?\n', Comment.Single, '#pop'),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'whitespace': [
+ (r'[\n\r]', Whitespace),
+ (r'(\\)([\n\r])', bygroups(Text, Whitespace)),
+ (r'[ \t]+', Whitespace),
+ (r';.*', Comment.Single)
+ ],
+ 'punctuation': [
+ (r'[,():\[\]]+', Punctuation),
+ (r'[&|^<>+*=/%~-]+', Operator),
+ (r'[$]+', Keyword.Constant),
+ (wordop, Operator.Word),
+ (type, Keyword.Type)
+ ],
+ }
+
+ def analyse_text(text):
+ # See above
+ if re.match(r'PROC', text, re.I):
+ return True
+
+
+class Ca65Lexer(RegexLexer):
+ """
+ For ca65 assembler sources.
+
+ .. versionadded:: 1.6
+ """
+ name = 'ca65 assembler'
+ aliases = ['ca65']
+ filenames = ['*.s']
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r';.*', Comment.Single),
+ (r'\s+', Whitespace),
+ (r'[a-z_.@$][\w.@$]*:', Name.Label),
+ (r'((ld|st)[axy]|(in|de)[cxy]|asl|lsr|ro[lr]|adc|sbc|cmp|cp[xy]'
+ r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs'
+ r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor'
+ r'|bit)\b', Keyword),
+ (r'\.\w+', Keyword.Pseudo),
+ (r'[-+~*/^&|!<>=]', Operator),
+ (r'"[^"\n]*.', String),
+ (r"'[^'\n]*.", String.Char),
+ (r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'%[01]+', Number.Bin),
+ (r'[#,.:()=\[\]]', Punctuation),
+ (r'[a-z_.@$][\w.@$]*', Name),
+ ]
+ }
+
+ def analyse_text(self, text):
+ # comments in GAS start with "#"
+ if re.search(r'^\s*;', text, re.MULTILINE):
+ return 0.9
+
+
+class Dasm16Lexer(RegexLexer):
+ """
+ For DCPU-16 Assembly.
+
+ .. versionadded:: 2.4
+ """
+ name = 'DASM16'
+ url = 'http://0x10c.com/doc/dcpu-16.txt'
+ aliases = ['dasm16']
+ filenames = ['*.dasm16', '*.dasm']
+ mimetypes = ['text/x-dasm16']
+
+ INSTRUCTIONS = [
+ 'SET',
+ 'ADD', 'SUB',
+ 'MUL', 'MLI',
+ 'DIV', 'DVI',
+ 'MOD', 'MDI',
+ 'AND', 'BOR', 'XOR',
+ 'SHR', 'ASR', 'SHL',
+ 'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU',
+ 'ADX', 'SBX',
+ 'STI', 'STD',
+ 'JSR',
+ 'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI',
+ ]
+
+ REGISTERS = [
+ 'A', 'B', 'C',
+ 'X', 'Y', 'Z',
+ 'I', 'J',
+ 'SP', 'PC', 'EX',
+ 'POP', 'PEEK', 'PUSH'
+ ]
+
+ # Regexes yo
+ char = r'[a-zA-Z0-9_$@.]'
+ identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
+ number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)'
+ binary_number = r'0b[01_]+'
+ instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')'
+ single_char = r"'\\?" + char + "'"
+ string = r'"(\\"|[^"])*"'
+
+ def guess_identifier(lexer, match):
+ ident = match.group(0)
+ klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label
+ yield match.start(), klass, ident
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ (':' + identifier, Name.Label),
+ (identifier + ':', Name.Label),
+ (instruction, Name.Function, 'instruction-args'),
+ (r'\.' + identifier, Name.Function, 'data-args'),
+ (r'[\r\n]+', Whitespace)
+ ],
+
+ 'numeric' : [
+ (binary_number, Number.Integer),
+ (number, Number.Integer),
+ (single_char, String),
+ ],
+
+ 'arg' : [
+ (identifier, guess_identifier),
+ include('numeric')
+ ],
+
+ 'deref' : [
+ (r'\+', Punctuation),
+ (r'\]', Punctuation, '#pop'),
+ include('arg'),
+ include('whitespace')
+ ],
+
+ 'instruction-line' : [
+ (r'[\r\n]+', Whitespace, '#pop'),
+ (r';.*?$', Comment, '#pop'),
+ include('whitespace')
+ ],
+
+ 'instruction-args': [
+ (r',', Punctuation),
+ (r'\[', Punctuation, 'deref'),
+ include('arg'),
+ include('instruction-line')
+ ],
+
+ 'data-args' : [
+ (r',', Punctuation),
+ include('numeric'),
+ (string, String),
+ include('instruction-line')
+ ],
+
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r';.*?\n', Comment)
+ ],
+ }
diff --git a/pygments/lexers/automation.py b/pygments/lexers/automation.py
new file mode 100644
index 0000000..8730226
--- /dev/null
+++ b/pygments/lexers/automation.py
@@ -0,0 +1,381 @@
+"""
+ pygments.lexers.automation
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for automation scripting languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, combined
+from pygments.token import Text, Comment, Operator, Name, String, \
+ Number, Punctuation, Generic
+
+__all__ = ['AutohotkeyLexer', 'AutoItLexer']
+
+
+class AutohotkeyLexer(RegexLexer):
+ """
+ For autohotkey source code.
+
+ .. versionadded:: 1.4
+ """
+ name = 'autohotkey'
+ url = 'http://www.autohotkey.com/'
+ aliases = ['autohotkey', 'ahk']
+ filenames = ['*.ahk', '*.ahkl']
+ mimetypes = ['text/x-autohotkey']
+
+ tokens = {
+ 'root': [
+ (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
+ (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
+ (r'\s+;.*?$', Comment.Single),
+ (r'^;.*?$', Comment.Single),
+ (r'[]{}(),;[]', Punctuation),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ (r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
+ (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
+ include('commands'),
+ include('labels'),
+ include('builtInFunctions'),
+ include('builtInVariables'),
+ (r'"', String, combined('stringescape', 'dqs')),
+ include('numbers'),
+ (r'[a-zA-Z_#@$][\w#@$]*', Name),
+ (r'\\|\'', Text),
+ (r'\`([,%`abfnrtv\-+;])', String.Escape),
+ include('garbage'),
+ ],
+ 'incomment': [
+ (r'^\s*\*/', Comment.Multiline, '#pop'),
+ (r'[^*]+', Comment.Multiline),
+ (r'\*', Comment.Multiline)
+ ],
+ 'incontinuation': [
+ (r'^\s*\)', Generic, '#pop'),
+ (r'[^)]', Generic),
+ (r'[)]', Generic),
+ ],
+ 'commands': [
+ (r'(?i)^(\s*)(global|local|static|'
+ r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
+ r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
+ r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
+ r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
+ r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
+ r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
+ r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
+ r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
+ r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
+ r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
+ r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
+ r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
+ r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
+ r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
+ r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
+ r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
+ r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
+ r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
+ r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
+ r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
+ r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
+ r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
+ r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
+ r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
+ r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
+ r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
+ r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
+ r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
+ r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
+ r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
+ r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
+ r'SetBatchLines|SetCapslockState|SetControlDelay|'
+ r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
+ r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
+ r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
+ r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
+ r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
+ r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
+ r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
+ r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
+ r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
+ r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
+ r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
+ r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
+ r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
+ r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
+ r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
+ r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
+ r'WinWait)\b', bygroups(Text, Name.Builtin)),
+ ],
+ 'builtInFunctions': [
+ (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
+ r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
+ r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
+ r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
+ r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
+ r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
+ r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
+ r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
+ r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
+ r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
+ r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
+ r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
+ r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
+ r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
+ r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
+ r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
+ Name.Function),
+ ],
+ 'builtInVariables': [
+ (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
+ r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
+ r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
+ r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
+ r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
+ r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
+ r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
+ r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
+ r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
+ r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
+ r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
+ r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
+ r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
+ r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
+ r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
+ r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
+ r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
+ r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
+ r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
+ r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
+ r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
+ r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
+ r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
+ r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
+ r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
+ r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
+ r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
+ r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
+ r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
+ r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
+ Name.Variable),
+ ],
+ 'labels': [
+ # hotkeys and labels
+ # technically, hotkey names are limited to named keys and buttons
+ (r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
+ (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
+ ],
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+', Number.Float),
+ (r'0\d+', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+', Number.Integer)
+ ],
+ 'stringescape': [
+ (r'\"\"|\`([,%`abfnrtv])', String.Escape),
+ ],
+ 'strings': [
+ (r'[^"\n]+', String),
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ include('strings')
+ ],
+ 'garbage': [
+ (r'[^\S\n]', Text),
+ # (r'.', Text), # no cheating
+ ],
+ }
+
+
+class AutoItLexer(RegexLexer):
+ """
+ For AutoIt files.
+
+ AutoIt is a freeware BASIC-like scripting language
+ designed for automating the Windows GUI and general scripting
+
+ .. versionadded:: 1.6
+ """
+ name = 'AutoIt'
+ url = 'http://www.autoitscript.com/site/autoit/'
+ aliases = ['autoit']
+ filenames = ['*.au3']
+ mimetypes = ['text/x-autoit']
+
+ # Keywords, functions, macros from au3.keywords.properties
+ # which can be found in AutoIt installed directory, e.g.
+ # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
+
+ keywords = """\
+ #include-once #include #endregion #forcedef #forceref #region
+ and byref case continueloop dim do else elseif endfunc endif
+ endselect exit exitloop for func global
+ if local next not or return select step
+ then to until wend while exit""".split()
+
+ functions = """\
+ abs acos adlibregister adlibunregister asc ascw asin assign atan
+ autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
+ binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
+ blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
+ consolewrite consolewriteerror controlclick controlcommand controldisable
+ controlenable controlfocus controlgetfocus controlgethandle controlgetpos
+ controlgettext controlhide controllistview controlmove controlsend
+ controlsettext controlshow controltreeview cos dec dircopy dircreate
+ dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
+ dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
+ dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
+ drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
+ drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
+ drivespacetotal drivestatus envget envset envupdate eval execute exp
+ filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
+ filedelete fileexists filefindfirstfile filefindnextfile fileflush
+ filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
+ filegetshortname filegetsize filegettime filegetversion fileinstall filemove
+ fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
+ filesavedialog fileselectfolder filesetattrib filesetpos filesettime
+ filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
+ guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
+ guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
+ guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
+ guictrlcreateinput guictrlcreatelabel guictrlcreatelist
+ guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
+ guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
+ guictrlcreatepic guictrlcreateprogress guictrlcreateradio
+ guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
+ guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
+ guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
+ guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
+ guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
+ guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
+ guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
+ guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
+ guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
+ guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
+ guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
+ httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
+ inetread inidelete iniread inireadsection inireadsectionnames
+ inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
+ isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
+ isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
+ mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
+ number objcreate objcreateinterface objevent objevent objget objname
+ onautoitexitregister onautoitexitunregister opt ping pixelchecksum
+ pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
+ processgetstats processlist processsetpriority processwait processwaitclose
+ progressoff progresson progressset ptr random regdelete regenumkey
+ regenumval regread regwrite round run runas runaswait runwait send
+ sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
+ sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
+ sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
+ string stringaddcr stringcompare stringformat stringfromasciiarray
+ stringinstr stringisalnum stringisalpha stringisascii stringisdigit
+ stringisfloat stringisint stringislower stringisspace stringisupper
+ stringisxdigit stringleft stringlen stringlower stringmid stringregexp
+ stringregexpreplace stringreplace stringright stringsplit stringstripcr
+ stringstripws stringtoasciiarray stringtobinary stringtrimleft
+ stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
+ tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
+ timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
+ trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
+ trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
+ traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
+ udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
+ winactivate winactive winclose winexists winflash wingetcaretpos
+ wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
+ wingetstate wingettext wingettitle winkill winlist winmenuselectitem
+ winminimizeall winminimizeallundo winmove winsetontop winsetstate
+ winsettitle winsettrans winwait winwaitactive winwaitclose
+ winwaitnotactive""".split()
+
+ macros = """\
+ @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
+ @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
+ @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
+ @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
+ @exitcode @exitmethod @extended @favoritescommondir @favoritesdir
+ @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
+ @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
+ @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
+ @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
+ @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
+ @osversion @programfilesdir @programscommondir @programsdir @scriptdir
+ @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
+ @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
+ @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
+ @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
+ @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
+ @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
+ @windowsdir @workingdir @yday @year""".split()
+
+ tokens = {
+ 'root': [
+ (r';.*\n', Comment.Single),
+ (r'(#comments-start|#cs)(.|\n)*?(#comments-end|#ce)',
+ Comment.Multiline),
+ (r'[\[\]{}(),;]', Punctuation),
+ (r'(and|or|not)\b', Operator.Word),
+ (r'[$|@][a-zA-Z_]\w*', Name.Variable),
+ (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
+ include('commands'),
+ include('labels'),
+ include('builtInFunctions'),
+ include('builtInMarcros'),
+ (r'"', String, combined('stringescape', 'dqs')),
+ (r"'", String, 'sqs'),
+ include('numbers'),
+ (r'[a-zA-Z_#@$][\w#@$]*', Name),
+ (r'\\|\'', Text),
+ (r'\`([,%`abfnrtv\-+;])', String.Escape),
+ (r'_\n', Text), # Line continuation
+ include('garbage'),
+ ],
+ 'commands': [
+ (r'(?i)(\s*)(%s)\b' % '|'.join(keywords),
+ bygroups(Text, Name.Builtin)),
+ ],
+ 'builtInFunctions': [
+ (r'(?i)(%s)\b' % '|'.join(functions),
+ Name.Function),
+ ],
+ 'builtInMarcros': [
+ (r'(?i)(%s)\b' % '|'.join(macros),
+ Name.Variable.Global),
+ ],
+ 'labels': [
+ # sendkeys
+ (r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
+ ],
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+', Number.Float),
+ (r'0\d+', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+', Number.Integer)
+ ],
+ 'stringescape': [
+ (r'\"\"|\`([,%`abfnrtv])', String.Escape),
+ ],
+ 'strings': [
+ (r'[^"\n]+', String),
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ include('strings')
+ ],
+ 'sqs': [
+ (r'\'\'|\`([,%`abfnrtv])', String.Escape),
+ (r"'", String, '#pop'),
+ (r"[^'\n]+", String)
+ ],
+ 'garbage': [
+ (r'[^\S\n]', Text),
+ ],
+ }
diff --git a/pygments/lexers/bare.py b/pygments/lexers/bare.py
new file mode 100644
index 0000000..a9de9ba
--- /dev/null
+++ b/pygments/lexers/bare.py
@@ -0,0 +1,102 @@
+"""
+ pygments.lexers.bare
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the BARE schema.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, bygroups
+from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace
+
+__all__ = ['BareLexer']
+
+
+class BareLexer(RegexLexer):
+ """
+ For BARE schema source.
+
+ .. versionadded:: 2.7
+ """
+ name = 'BARE'
+ url = 'https://baremessages.org'
+ filenames = ['*.bare']
+ aliases = ['bare']
+
+ keywords = [
+ 'type',
+ 'enum',
+ 'u8',
+ 'u16',
+ 'u32',
+ 'u64',
+ 'uint',
+ 'i8',
+ 'i16',
+ 'i32',
+ 'i64',
+ 'int',
+ 'f32',
+ 'f64',
+ 'bool',
+ 'void',
+ 'data',
+ 'string',
+ 'optional',
+ 'map',
+ ]
+
+ tokens = {
+ 'root': [
+ (r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\{)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'struct'),
+ (r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\()',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'union'),
+ (r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)',
+ bygroups(Keyword, Whitespace, Name, Whitespace), 'typedef'),
+ (r'(enum)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\{)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'enum'),
+ (r'#.*?$', Comment),
+ (r'\s+', Whitespace),
+ ],
+ 'struct': [
+ (r'\{', Text, '#push'),
+ (r'\}', Text, '#pop'),
+ (r'([a-zA-Z0-9]+)(:)(\s*)',
+ bygroups(Name.Attribute, Text, Whitespace), 'typedef'),
+ (r'\s+', Whitespace),
+ ],
+ 'union': [
+ (r'\)', Text, '#pop'),
+ (r'(\s*)(\|)(\s*)', bygroups(Whitespace, Text, Whitespace)),
+ (r'[A-Z][a-zA-Z0-9]+', Name.Class),
+ (words(keywords), Keyword),
+ (r'\s+', Whitespace),
+ ],
+ 'typedef': [
+ (r'\[\]', Text),
+ (r'#.*?$', Comment, '#pop'),
+ (r'(\[)(\d+)(\])', bygroups(Text, Literal, Text)),
+ (r'<|>', Text),
+ (r'\(', Text, 'union'),
+ (r'(\[)([a-z][a-z-A-Z0-9]+)(\])', bygroups(Text, Keyword, Text)),
+ (r'(\[)([A-Z][a-z-A-Z0-9]+)(\])', bygroups(Text, Name.Class, Text)),
+ (r'([A-Z][a-z-A-Z0-9]+)', Name.Class),
+ (words(keywords), Keyword),
+ (r'\n', Text, '#pop'),
+ (r'\{', Text, 'struct'),
+ (r'\s+', Whitespace),
+ (r'\d+', Literal),
+ ],
+ 'enum': [
+ (r'\{', Text, '#push'),
+ (r'\}', Text, '#pop'),
+ (r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)',
+ bygroups(Name.Attribute, Text, Literal)),
+ (r'([A-Z][A-Z0-9_]*)', bygroups(Name.Attribute)),
+ (r'#.*?$', Comment),
+ (r'\s+', Whitespace),
+ ],
+ }
diff --git a/pygments/lexers/basic.py b/pygments/lexers/basic.py
new file mode 100644
index 0000000..e726e05
--- /dev/null
+++ b/pygments/lexers/basic.py
@@ -0,0 +1,665 @@
+"""
+ pygments.lexers.basic
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for BASIC like languages (other than VB.net).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, default, words, include
+from pygments.token import Comment, Error, Keyword, Name, Number, \
+ Punctuation, Operator, String, Text, Whitespace
+from pygments.lexers import _vbscript_builtins
+
+
+__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
+ 'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
+
+
+class BlitzMaxLexer(RegexLexer):
+ """
+ For BlitzMax source code.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'BlitzMax'
+ url = 'http://blitzbasic.com'
+ aliases = ['blitzmax', 'bmax']
+ filenames = ['*.bmx']
+ mimetypes = ['text/x-bmx']
+
+ bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
+ bmax_sktypes = r'@{1,2}|[!#$%]'
+ bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
+ bmax_name = r'[a-z_]\w*'
+ bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
+ r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
+ (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
+ bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
+
+ flags = re.MULTILINE | re.IGNORECASE
+ tokens = {
+ 'root': [
+ # Text
+ (r'\s+', Whitespace),
+ (r'(\.\.)(\n)', bygroups(Text, Whitespace)), # Line continuation
+ # Comments
+ (r"'.*?\n", Comment.Single),
+ (r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
+ # Data types
+ ('"', String.Double, 'string'),
+ # Numbers
+ (r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
+ (r'\.[0-9]*(?!\.)', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'\$[0-9a-f]+', Number.Hex),
+ (r'\%[10]+', Number.Bin),
+ # Other
+ (r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
+ (bmax_vopwords), Operator),
+ (r'[(),.:\[\]]', Punctuation),
+ (r'(?:#[\w \t]*)', Name.Label),
+ (r'(?:\?[\w \t]*)', Comment.Preproc),
+ # Identifiers
+ (r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
+ bygroups(Keyword.Reserved, Whitespace, Punctuation, Name.Class)),
+ (r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
+ (bmax_name, bmax_name),
+ bygroups(Keyword.Reserved, Whitespace, Keyword.Namespace)),
+ (bmax_func, bygroups(Name.Function, Whitespace, Keyword.Type,
+ Operator, Whitespace, Punctuation, Whitespace,
+ Keyword.Type, Name.Class, Whitespace,
+ Keyword.Type, Whitespace, Punctuation)),
+ (bmax_var, bygroups(Name.Variable, Whitespace, Keyword.Type, Operator,
+ Whitespace, Punctuation, Whitespace, Keyword.Type,
+ Name.Class, Whitespace, Keyword.Type)),
+ (r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
+ bygroups(Keyword.Reserved, Whitespace, Name.Class)),
+ # Keywords
+ (r'\b(Ptr)\b', Keyword.Type),
+ (r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
+ (r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
+ (words((
+ 'TNullMethodException', 'TNullFunctionException',
+ 'TNullObjectException', 'TArrayBoundsException',
+ 'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
+ (words((
+ 'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
+ 'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
+ 'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
+ 'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
+ 'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
+ 'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
+ 'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
+ 'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
+ 'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
+ 'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
+ 'RestoreData'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ # Final resolve (for variable names and such)
+ (r'(%s)' % (bmax_name), Name.Variable),
+ ],
+ 'string': [
+ (r'""', String.Double),
+ (r'"C?', String.Double, '#pop'),
+ (r'[^"]+', String.Double),
+ ],
+ }
+
+
+class BlitzBasicLexer(RegexLexer):
+ """
+ For BlitzBasic source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'BlitzBasic'
+ url = 'http://blitzbasic.com'
+ aliases = ['blitzbasic', 'b3d', 'bplus']
+ filenames = ['*.bb', '*.decls']
+ mimetypes = ['text/x-bb']
+
+ bb_sktypes = r'@{1,2}|[#$%]'
+ bb_name = r'[a-z]\w*'
+ bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
+ (bb_name, bb_sktypes, bb_name)
+
+ flags = re.MULTILINE | re.IGNORECASE
+ tokens = {
+ 'root': [
+ # Text
+ (r'\s+', Whitespace),
+ # Comments
+ (r";.*?\n", Comment.Single),
+ # Data types
+ ('"', String.Double, 'string'),
+ # Numbers
+ (r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
+ (r'\.[0-9]+(?!\.)', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'\$[0-9a-f]+', Number.Hex),
+ (r'\%[10]+', Number.Bin),
+ # Other
+ (words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
+ 'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
+ 'First', 'Last', 'Before', 'After'),
+ prefix=r'\b', suffix=r'\b'),
+ Operator),
+ (r'([+\-*/~=<>^])', Operator),
+ (r'[(),:\[\]\\]', Punctuation),
+ (r'\.([ \t]*)(%s)' % bb_name, Name.Label),
+ # Identifiers
+ (r'\b(New)\b([ \t]+)(%s)' % (bb_name),
+ bygroups(Keyword.Reserved, Whitespace, Name.Class)),
+ (r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
+ bygroups(Keyword.Reserved, Whitespace, Name.Label)),
+ (r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
+ bygroups(Operator, Whitespace, Punctuation, Whitespace, Name.Class)),
+ (r'\b%s\b([ \t]*)(\()' % bb_var,
+ bygroups(Name.Function, Whitespace, Keyword.Type, Whitespace, Punctuation,
+ Whitespace, Name.Class, Whitespace, Punctuation)),
+ (r'\b(Function)\b([ \t]+)%s' % bb_var,
+ bygroups(Keyword.Reserved, Whitespace, Name.Function, Whitespace, Keyword.Type,
+ Whitespace, Punctuation, Whitespace, Name.Class)),
+ (r'\b(Type)([ \t]+)(%s)' % (bb_name),
+ bygroups(Keyword.Reserved, Whitespace, Name.Class)),
+ # Keywords
+ (r'\b(Pi|True|False|Null)\b', Keyword.Constant),
+ (r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
+ (words((
+ 'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
+ 'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
+ 'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
+ 'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
+ 'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ # Final resolve (for variable names and such)
+ # (r'(%s)' % (bb_name), Name.Variable),
+ (bb_var, bygroups(Name.Variable, Whitespace, Keyword.Type,
+ Whitespace, Punctuation, Whitespace, Name.Class)),
+ ],
+ 'string': [
+ (r'""', String.Double),
+ (r'"C?', String.Double, '#pop'),
+ (r'[^"\n]+', String.Double),
+ ],
+ }
+
+
+class MonkeyLexer(RegexLexer):
+ """
+ For
+ `Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
+ source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Monkey'
+ aliases = ['monkey']
+ filenames = ['*.monkey']
+ mimetypes = ['text/x-monkey']
+
+ name_variable = r'[a-z_]\w*'
+ name_function = r'[A-Z]\w*'
+ name_constant = r'[A-Z_][A-Z0-9_]*'
+ name_class = r'[A-Z]\w*'
+ name_module = r'[a-z0-9_]*'
+
+ keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
+ # ? == Bool // % == Int // # == Float // $ == String
+ keyword_type_special = r'[?%#$]'
+
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ # Text
+ (r'\s+', Whitespace),
+ # Comments
+ (r"'.*", Comment),
+ (r'(?i)^#rem\b', Comment.Multiline, 'comment'),
+ # preprocessor directives
+ (r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
+ # preprocessor variable (any line starting with '#' that is not a directive)
+ (r'^#', Comment.Preproc, 'variables'),
+ # String
+ ('"', String.Double, 'string'),
+ # Numbers
+ (r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
+ (r'\.[0-9]+(?!\.)', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'\$[0-9a-fA-Z]+', Number.Hex),
+ (r'\%[10]+', Number.Bin),
+ # Native data types
+ (r'\b%s\b' % keyword_type, Keyword.Type),
+ # Exception handling
+ (r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
+ (r'Throwable', Name.Exception),
+ # Builtins
+ (r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
+ (r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
+ (r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
+ # Keywords
+ (r'(?i)^(Import)(\s+)(.*)(\n)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace, Whitespace)),
+ (r'(?i)^Strict\b.*\n', Keyword.Reserved),
+ (r'(?i)(Const|Local|Global|Field)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'variables'),
+ (r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
+ bygroups(Keyword.Reserved, Whitespace), 'classname'),
+ (r'(?i)(Function|Method)(\s+)',
+ bygroups(Keyword.Reserved, Whitespace), 'funcname'),
+ (r'(?i)(?:End|Return|Public|Private|Extern|Property|'
+ r'Final|Abstract)\b', Keyword.Reserved),
+ # Flow Control stuff
+ (r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
+ r'Select|Case|Default|'
+ r'While|Wend|'
+ r'Repeat|Until|Forever|'
+ r'For|To|Until|Step|EachIn|Next|'
+ r'Exit|Continue)(?=\s)', Keyword.Reserved),
+ # not used yet
+ (r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
+ # Array
+ (r'[\[\]]', Punctuation),
+ # Other
+ (r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
+ (r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
+ (r'[(){}!#,.:]', Punctuation),
+ # catch the rest
+ (r'%s\b' % name_constant, Name.Constant),
+ (r'%s\b' % name_function, Name.Function),
+ (r'%s\b' % name_variable, Name.Variable),
+ ],
+ 'funcname': [
+ (r'(?i)%s\b' % name_function, Name.Function),
+ (r':', Punctuation, 'classname'),
+ (r'\s+', Whitespace),
+ (r'\(', Punctuation, 'variables'),
+ (r'\)', Punctuation, '#pop')
+ ],
+ 'classname': [
+ (r'%s\.' % name_module, Name.Namespace),
+ (r'%s\b' % keyword_type, Keyword.Type),
+ (r'%s\b' % name_class, Name.Class),
+ # array (of given size)
+ (r'(\[)(\s*)(\d*)(\s*)(\])',
+ bygroups(Punctuation, Whitespace, Number.Integer, Whitespace, Punctuation)),
+ # generics
+ (r'\s+(?!<)', Whitespace, '#pop'),
+ (r'<', Punctuation, '#push'),
+ (r'>', Punctuation, '#pop'),
+ (r'\n', Whitespace, '#pop'),
+ default('#pop')
+ ],
+ 'variables': [
+ (r'%s\b' % name_constant, Name.Constant),
+ (r'%s\b' % name_variable, Name.Variable),
+ (r'%s' % keyword_type_special, Keyword.Type),
+ (r'\s+', Whitespace),
+ (r':', Punctuation, 'classname'),
+ (r',', Punctuation, '#push'),
+ default('#pop')
+ ],
+ 'string': [
+ (r'[^"~]+', String.Double),
+ (r'~q|~n|~r|~t|~z|~~', String.Escape),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'comment': [
+ (r'(?i)^#rem.*?', Comment.Multiline, "#push"),
+ (r'(?i)^#end.*?', Comment.Multiline, "#pop"),
+ (r'\n', Comment.Multiline),
+ (r'.+', Comment.Multiline),
+ ],
+ }
+
+
+class CbmBasicV2Lexer(RegexLexer):
+ """
+ For CBM BASIC V2 sources.
+
+ .. versionadded:: 1.6
+ """
+ name = 'CBM BASIC V2'
+ aliases = ['cbmbas']
+ filenames = ['*.bas']
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'rem.*\n', Comment.Single),
+ (r'\s+', Whitespace),
+ (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
+ r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
+ r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
+ (r'data|restore|dim|let|def|fn', Keyword.Declaration),
+ (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
+ r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
+ (r'[-+*/^<>=]', Operator),
+ (r'not|and|or', Operator.Word),
+ (r'"[^"\n]*.', String),
+ (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
+ (r'[(),:;]', Punctuation),
+ (r'\w+[$%]?', Name),
+ ]
+ }
+
+ def analyse_text(text):
+ # if it starts with a line number, it shouldn't be a "modern" Basic
+ # like VB.net
+ if re.match(r'^\d+', text):
+ return 0.2
+
+
+class QBasicLexer(RegexLexer):
+ """
+ For
+ `QBasic <http://en.wikipedia.org/wiki/QBasic>`_
+ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'QBasic'
+ aliases = ['qbasic', 'basic']
+ filenames = ['*.BAS', '*.bas']
+ mimetypes = ['text/basic']
+
+ declarations = ('DATA', 'LET')
+
+ functions = (
+ 'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
+ 'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
+ 'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
+ 'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
+ 'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
+ 'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
+ 'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
+ 'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
+ 'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
+ 'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
+ 'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
+ 'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
+ 'VARPTR$', 'VARSEG'
+ )
+
+ metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
+
+ operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
+
+ statements = (
+ 'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
+ 'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
+ 'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
+ 'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
+ 'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
+ 'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
+ 'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
+ 'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
+ 'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
+ 'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
+ 'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
+ 'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
+ 'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
+ 'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
+ 'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
+ 'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
+ 'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
+ 'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
+ 'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
+ 'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
+ )
+
+ keywords = (
+ 'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
+ 'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
+ 'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
+ 'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
+ 'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
+ )
+
+ tokens = {
+ 'root': [
+ (r'\n+', Text),
+ (r'\s+', Text.Whitespace),
+ (r'^(\s*)(\d*)(\s*)(REM .*)$',
+ bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
+ Comment.Single)),
+ (r'^(\s*)(\d+)(\s*)',
+ bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
+ (r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
+ (r'(?=[^"]*)\'.*$', Comment.Single),
+ (r'"[^\n"]*"', String.Double),
+ (r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
+ bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
+ (r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
+ Text.Whitespace, Name)),
+ (r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
+ Text.Whitespace, Name.Variable.Global)),
+ (r'(DIM)(\s+)([^\s(]+)',
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
+ (r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
+ bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
+ Operator)),
+ (r'(GOTO|GOSUB)(\s+)(\w+\:?)',
+ bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
+ (r'(SUB)(\s+)(\w+\:?)',
+ bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
+ include('declarations'),
+ include('functions'),
+ include('metacommands'),
+ include('operators'),
+ include('statements'),
+ include('keywords'),
+ (r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
+ (r'[a-zA-Z_]\w*\:', Name.Label),
+ (r'\-?\d*\.\d+[@|#]?', Number.Float),
+ (r'\-?\d+[@|#]', Number.Float),
+ (r'\-?\d+#?', Number.Integer.Long),
+ (r'\-?\d+#?', Number.Integer),
+ (r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
+ (r'[\[\]{}(),;]', Punctuation),
+ (r'[\w]+', Name.Variable.Global),
+ ],
+ # can't use regular \b because of X$()
+ # XXX: use words() here
+ 'declarations': [
+ (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
+ Keyword.Declaration),
+ ],
+ 'functions': [
+ (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
+ Keyword.Reserved),
+ ],
+ 'metacommands': [
+ (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
+ Keyword.Constant),
+ ],
+ 'operators': [
+ (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
+ ],
+ 'statements': [
+ (r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
+ Keyword.Reserved),
+ ],
+ 'keywords': [
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword),
+ ],
+ }
+
+ def analyse_text(text):
+ if '$DYNAMIC' in text or '$STATIC' in text:
+ return 0.9
+
+
+class VBScriptLexer(RegexLexer):
+ """
+ VBScript is scripting language that is modeled on Visual Basic.
+
+ .. versionadded:: 2.4
+ """
+ name = 'VBScript'
+ aliases = ['vbscript']
+ filenames = ['*.vbs', '*.VBS']
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r"'[^\n]*", Comment.Single),
+ (r'\s+', Whitespace),
+ ('"', String.Double, 'string'),
+ ('&h[0-9a-f]+', Number.Hex),
+ # Float variant 1, for example: 1., 1.e2, 1.2e3
+ (r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
+ (r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2
+ (r'[0-9]+e[+-]?[0-9]+', Number.Float), # Float variant 3, for example: 123e45
+ (r'[0-9]+', Number.Integer),
+ ('#.+#', String), # date or time value
+ (r'(dim)(\s+)([a-z_][a-z0-9_]*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
+ (r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Function)),
+ (r'(class)(\s+)([a-z_][a-z0-9_]*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class)),
+ (r'(const)(\s+)([a-z_][a-z0-9_]*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
+ (r'(end)(\s+)(class|function|if|property|sub|with)',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
+ (r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
+ (r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
+ (r'rem\s.*[^\n]*', Comment.Single),
+ (words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
+ (words(_vbscript_builtins.OPERATORS), Operator),
+ (words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
+ (words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
+ (words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
+ (words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
+ (r'[a-z_][a-z0-9_]*', Name),
+ (r'\b_\n', Operator),
+ (words(r'(),.:'), Punctuation),
+ (r'.+(\n)?', Error)
+ ],
+ 'dim_more': [
+ (r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)',
+ bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
+ default('#pop'),
+ ],
+ 'string': [
+ (r'[^"\n]+', String.Double),
+ (r'\"\"', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\n', Error, '#pop'), # Unterminated string
+ ],
+ }
+
+
+class BBCBasicLexer(RegexLexer):
+ """
+ BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
+ It is also used by BBC Basic For Windows.
+
+ .. versionadded:: 2.4
+ """
+ base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
+ 'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
+ 'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
+ 'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
+ 'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
+ 'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
+ 'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
+ 'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
+ 'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
+ 'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
+ 'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
+ 'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
+ 'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
+ 'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
+ 'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
+ 'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
+ 'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
+
+ basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
+ 'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
+ 'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
+ 'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
+ 'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
+ 'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
+ 'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
+ 'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
+
+
+ name = 'BBC Basic'
+ aliases = ['bbcbasic']
+ filenames = ['*.bbc']
+
+ tokens = {
+ 'root': [
+ (r"[0-9]+", Name.Label),
+ (r"(\*)([^\n]*)",
+ bygroups(Keyword.Pseudo, Comment.Special)),
+ default('code'),
+ ],
+
+ 'code': [
+ (r"(REM)([^\n]*)",
+ bygroups(Keyword.Declaration, Comment.Single)),
+ (r'\n', Whitespace, 'root'),
+ (r'\s+', Whitespace),
+ (r':', Comment.Preproc),
+
+ # Some special cases to make functions come out nicer
+ (r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
+ bygroups(Keyword.Declaration, Whitespace,
+ Keyword.Declaration, Name.Function)),
+ (r'(FN|PROC)([A-Za-z_@][\w@]*)',
+ bygroups(Keyword, Name.Function)),
+
+ (r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
+ bygroups(Keyword, Whitespace, Name.Label)),
+
+ (r'(TRUE|FALSE)', Keyword.Constant),
+ (r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)',
+ Keyword.Pseudo),
+
+ (words(base_keywords), Keyword),
+ (words(basic5_keywords), Keyword),
+
+ ('"', String.Double, 'string'),
+
+ ('%[01]{1,32}', Number.Bin),
+ ('&[0-9a-f]{1,8}', Number.Hex),
+
+ (r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
+ (r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
+ (r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
+ (r'[+-]?\d+', Number.Integer),
+
+ (r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
+ (r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
+ ],
+ 'string': [
+ (r'[^"\n]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\n', Error, 'root'), # Unterminated string
+ ],
+ }
+
+ def analyse_text(text):
+ if text.startswith('10REM >') or text.startswith('REM >'):
+ return 0.9
diff --git a/pygments/lexers/bdd.py b/pygments/lexers/bdd.py
new file mode 100644
index 0000000..4dbe33e
--- /dev/null
+++ b/pygments/lexers/bdd.py
@@ -0,0 +1,58 @@
+"""
+ pygments.lexers.bdd
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for BDD(Behavior-driven development).
+ More information: https://en.wikipedia.org/wiki/Behavior-driven_development
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include
+from pygments.token import Comment, Keyword, Name, String, Number, Text, \
+ Punctuation, Whitespace
+
+__all__ = ['BddLexer']
+
+class BddLexer(RegexLexer):
+ """
+ Lexer for BDD(Behavior-driven development), which highlights not only
+ keywords, but also comments, punctuations, strings, numbers, and variables.
+
+ .. versionadded:: 2.11
+ """
+
+ name = 'Bdd'
+ aliases = ['bdd']
+ filenames = ['*.feature']
+ mimetypes = ['text/x-bdd']
+
+ step_keywords = (r'Given|When|Then|Add|And|Feature|Scenario Outline|'
+ r'Scenario|Background|Examples|But')
+
+ tokens = {
+ 'comments': [
+ (r'^\s*#.*$', Comment),
+ ],
+ 'miscellaneous': [
+ (r'(<|>|\[|\]|=|\||:|\(|\)|\{|\}|,|\.|;|-|_|\$)', Punctuation),
+ (r'((?<=\<)[^\\>]+(?=\>))', Name.Variable),
+ (r'"([^\"]*)"', String),
+ (r'^@\S+', Name.Label),
+ ],
+ 'numbers': [
+ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number),
+ ],
+ 'root': [
+ (r'\n|\s+', Whitespace),
+ (step_keywords, Keyword),
+ include('comments'),
+ include('miscellaneous'),
+ include('numbers'),
+ (r'\S+', Text),
+ ]
+ }
+
+ def analyse_text(self, text):
+ return
diff --git a/pygments/lexers/berry.py b/pygments/lexers/berry.py
new file mode 100644
index 0000000..cbdbb16
--- /dev/null
+++ b/pygments/lexers/berry.py
@@ -0,0 +1,99 @@
+"""
+ pygments.lexers.berry
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Berry.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include, bygroups
+from pygments.token import Comment, Whitespace, Operator, Keyword, Name, \
+ String, Number, Punctuation
+
+__all__ = ['BerryLexer']
+
+
+class BerryLexer(RegexLexer):
+ """
+ For `berry <http://github.com/berry-lang/berry>`_ source code.
+
+ .. versionadded:: 2.12.0
+ """
+ name = 'Berry'
+ aliases = ['berry', 'be']
+ filenames = ['*.be']
+ mimetypes = ['text/x-berry', 'application/x-berry']
+
+ _name = r'\b[^\W\d]\w*'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('numbers'),
+ include('keywords'),
+ (rf'(def)(\s+)({_name})',
+ bygroups(Keyword.Declaration, Whitespace, Name.Function)),
+ (rf'\b(class)(\s+)({_name})',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class)),
+ (rf'\b(import)(\s+)({_name})',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+ include('expr')
+ ],
+ 'expr': [
+ (r'[^\S\n]+', Whitespace),
+ (r'\.\.|[~!%^&*+=|?:<>/-]', Operator),
+ (r'[(){}\[\],.;]', Punctuation),
+ include('controls'),
+ include('builtins'),
+ include('funccall'),
+ include('member'),
+ include('name'),
+ include('strings')
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ (r'#-(.|\n)*?-#', Comment.Multiline),
+ (r'#.*?$', Comment.Single)
+ ],
+ 'keywords': [
+ (words((
+ 'as', 'break', 'continue', 'import', 'static', 'self', 'super'),
+ suffix=r'\b'), Keyword.Reserved),
+ (r'(true|false|nil)\b', Keyword.Constant),
+ (r'(var|def)\b', Keyword.Declaration)
+ ],
+ 'controls': [
+ (words((
+ 'if', 'elif', 'else', 'for', 'while', 'do', 'end', 'break',
+ 'continue', 'return', 'try', 'except', 'raise'),
+ suffix=r'\b'), Keyword)
+ ],
+ 'builtins': [
+ (words((
+ 'assert', 'bool', 'input', 'classname', 'classof', 'number', 'real',
+ 'bytes', 'compile', 'map', 'list', 'int', 'isinstance', 'print',
+ 'range', 'str', 'super', 'module', 'size', 'issubclass', 'open',
+ 'file', 'type', 'call'),
+ suffix=r'\b'), Name.Builtin)
+ ],
+ 'numbers': [
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'-?\d+', Number.Integer),
+ (r'(-?\d+\.?|\.\d)\d*([eE][+-]?\d+)?', Number.Float)
+ ],
+ 'name': [
+ (_name, Name)
+ ],
+ 'funccall': [
+ (rf'{_name}(?=\s*\()', Name.Function, '#pop')
+ ],
+ 'member': [
+ (rf'(?<=\.){_name}\b(?!\()', Name.Attribute, '#pop')
+ ],
+ 'strings': [
+ (r'"([^\\]|\\.)*?"', String.Double, '#pop'),
+ (r'\'([^\\]|\\.)*?\'', String.Single, '#pop')
+ ]
+ }
diff --git a/pygments/lexers/bibtex.py b/pygments/lexers/bibtex.py
new file mode 100644
index 0000000..cbc5a8e
--- /dev/null
+++ b/pygments/lexers/bibtex.py
@@ -0,0 +1,159 @@
+"""
+ pygments.lexers.bibtex
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for BibTeX bibliography data and styles
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, default, \
+ words
+from pygments.token import Name, Comment, String, Error, Number, Keyword, \
+ Punctuation, Whitespace
+
+__all__ = ['BibTeXLexer', 'BSTLexer']
+
+
+class BibTeXLexer(ExtendedRegexLexer):
+ """
+ A lexer for BibTeX bibliography data format.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'BibTeX'
+ aliases = ['bibtex', 'bib']
+ filenames = ['*.bib']
+ mimetypes = ["text/x-bibtex"]
+ flags = re.IGNORECASE
+
+ ALLOWED_CHARS = r'@!$&*+\-./:;<>?\[\\\]^`|~'
+ IDENTIFIER = '[{}][{}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS)
+
+ def open_brace_callback(self, match, ctx):
+ opening_brace = match.group()
+ ctx.opening_brace = opening_brace
+ yield match.start(), Punctuation, opening_brace
+ ctx.pos = match.end()
+
+ def close_brace_callback(self, match, ctx):
+ closing_brace = match.group()
+ if (
+ ctx.opening_brace == '{' and closing_brace != '}' or
+ ctx.opening_brace == '(' and closing_brace != ')'
+ ):
+ yield match.start(), Error, closing_brace
+ else:
+ yield match.start(), Punctuation, closing_brace
+ del ctx.opening_brace
+ ctx.pos = match.end()
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ (r'@comment(?!ary)', Comment),
+ ('@preamble', Name.Class, ('closing-brace', 'value', 'opening-brace')),
+ ('@string', Name.Class, ('closing-brace', 'field', 'opening-brace')),
+ ('@' + IDENTIFIER, Name.Class,
+ ('closing-brace', 'command-body', 'opening-brace')),
+ ('.+', Comment),
+ ],
+ 'opening-brace': [
+ include('whitespace'),
+ (r'[{(]', open_brace_callback, '#pop'),
+ ],
+ 'closing-brace': [
+ include('whitespace'),
+ (r'[})]', close_brace_callback, '#pop'),
+ ],
+ 'command-body': [
+ include('whitespace'),
+ (r'[^\s\,\}]+', Name.Label, ('#pop', 'fields')),
+ ],
+ 'fields': [
+ include('whitespace'),
+ (',', Punctuation, 'field'),
+ default('#pop'),
+ ],
+ 'field': [
+ include('whitespace'),
+ (IDENTIFIER, Name.Attribute, ('value', '=')),
+ default('#pop'),
+ ],
+ '=': [
+ include('whitespace'),
+ ('=', Punctuation, '#pop'),
+ ],
+ 'value': [
+ include('whitespace'),
+ (IDENTIFIER, Name.Variable),
+ ('"', String, 'quoted-string'),
+ (r'\{', String, 'braced-string'),
+ (r'[\d]+', Number),
+ ('#', Punctuation),
+ default('#pop'),
+ ],
+ 'quoted-string': [
+ (r'\{', String, 'braced-string'),
+ ('"', String, '#pop'),
+ (r'[^\{\"]+', String),
+ ],
+ 'braced-string': [
+ (r'\{', String, '#push'),
+ (r'\}', String, '#pop'),
+ (r'[^\{\}]+', String),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ }
+
+
+class BSTLexer(RegexLexer):
+ """
+ A lexer for BibTeX bibliography styles.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'BST'
+ aliases = ['bst', 'bst-pybtex']
+ filenames = ['*.bst']
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ (words(['read', 'sort']), Keyword),
+ (words(['execute', 'integers', 'iterate', 'reverse', 'strings']),
+ Keyword, ('group')),
+ (words(['function', 'macro']), Keyword, ('group', 'group')),
+ (words(['entry']), Keyword, ('group', 'group', 'group')),
+ ],
+ 'group': [
+ include('whitespace'),
+ (r'\{', Punctuation, ('#pop', 'group-end', 'body')),
+ ],
+ 'group-end': [
+ include('whitespace'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'body': [
+ include('whitespace'),
+ (r"\'[^#\"\{\}\s]+", Name.Function),
+ (r'[^#\"\{\}\s]+\$', Name.Builtin),
+ (r'[^#\"\{\}\s]+', Name.Variable),
+ (r'"[^\"]*"', String),
+ (r'#-?\d+', Number),
+ (r'\{', Punctuation, ('group-end', 'body')),
+ default('#pop'),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ('%.*?$', Comment.Single),
+ ],
+ }
diff --git a/pygments/lexers/boa.py b/pygments/lexers/boa.py
new file mode 100644
index 0000000..96e34a9
--- /dev/null
+++ b/pygments/lexers/boa.py
@@ -0,0 +1,97 @@
+"""
+ pygments.lexers.boa
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Boa language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import String, Comment, Keyword, Name, Number, Operator, \
+ Punctuation, Whitespace
+
+__all__ = ['BoaLexer']
+
+
+class BoaLexer(RegexLexer):
+ """
+ Lexer for the `Boa <http://boa.cs.iastate.edu/docs/>`_ language.
+
+ .. versionadded:: 2.4
+ """
+ name = 'Boa'
+ aliases = ['boa']
+ filenames = ['*.boa']
+
+ reserved = words(
+ ('input', 'output', 'of', 'weight', 'before', 'after', 'stop',
+ 'ifall', 'foreach', 'exists', 'function', 'break', 'switch', 'case',
+ 'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
+ suffix=r'\b', prefix=r'\b')
+ keywords = words(
+ ('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum',
+ 'top', 'string', 'int', 'bool', 'float', 'time', 'false', 'true',
+ 'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
+ classes = words(
+ ('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind',
+ 'ChangedFile', 'FileKind', 'ASTRoot', 'Namespace', 'Declaration', 'Type',
+ 'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
+ 'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility',
+ 'TypeKind', 'Person', 'ChangeKind'),
+ suffix=r'\b', prefix=r'\b')
+ operators = ('->', ':=', ':', '=', '<<', '!', '++', '||',
+ '&&', '+', '-', '*', ">", "<")
+ string_sep = ('`', '\"')
+ built_in_functions = words(
+ (
+ # Array functions
+ 'new', 'sort',
+ # Date & Time functions
+ 'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now',
+ 'addday', 'addmonth', 'addweek', 'addyear', 'dayofmonth', 'dayofweek',
+ 'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
+ 'trunctomonth', 'trunctosecond', 'trunctoyear',
+ # Map functions
+ 'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
+ # Math functions
+ 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
+ 'ceil', 'cos', 'cosh', 'exp', 'floor', 'highbit', 'isfinite', 'isinf',
+ 'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow',
+ 'rand', 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
+ # Other functions
+ 'def', 'hash', 'len',
+ # Set functions
+ 'add', 'contains', 'remove',
+ # String functions
+ 'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex',
+ 'split', 'splitall', 'splitn', 'strfind', 'strreplace', 'strrfind',
+ 'substring', 'trim', 'uppercase',
+ # Type Conversion functions
+ 'bool', 'float', 'int', 'string', 'time',
+ # Domain-Specific functions
+ 'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind',
+ 'isliteral',
+ ),
+ prefix=r'\b',
+ suffix=r'\(')
+
+ tokens = {
+ 'root': [
+ (r'#.*?$', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (reserved, Keyword.Reserved),
+ (built_in_functions, Name.Function),
+ (keywords, Keyword.Type),
+ (classes, Name.Classes),
+ (words(operators), Operator),
+ (r'[][(),;{}\\.]', Punctuation),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"`(\\\\|\\[^\\]|[^`\\])*`", String.Backtick),
+ (words(string_sep), String.Delimiter),
+ (r'[a-zA-Z_]+', Name.Variable),
+ (r'[0-9]+', Number.Integer),
+ (r'\s+', Whitespace), # Whitespace
+ ]
+ }
diff --git a/pygments/lexers/business.py b/pygments/lexers/business.py
new file mode 100644
index 0000000..bffcd8a
--- /dev/null
+++ b/pygments/lexers/business.py
@@ -0,0 +1,626 @@
+"""
+ pygments.lexers.business
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for "business-oriented" languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error, Whitespace
+
+from pygments.lexers._openedge_builtins import OPENEDGEKEYWORDS
+
+__all__ = ['CobolLexer', 'CobolFreeformatLexer', 'ABAPLexer', 'OpenEdgeLexer',
+ 'GoodDataCLLexer', 'MaqlLexer']
+
+
+class CobolLexer(RegexLexer):
+ """
+ Lexer for OpenCOBOL code.
+
+ .. versionadded:: 1.6
+ """
+ name = 'COBOL'
+ aliases = ['cobol']
+ filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY']
+ mimetypes = ['text/x-cobol']
+ flags = re.IGNORECASE | re.MULTILINE
+
+ # Data Types: by PICTURE and USAGE
+ # Operators: **, *, +, -, /, <, >, <=, >=, =, <>
+ # Logical (?): NOT, AND, OR
+
+ # Reserved words:
+ # http://opencobol.add1tocobol.com/#reserved-words
+ # Intrinsics:
+ # http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions
+
+ tokens = {
+ 'root': [
+ include('comment'),
+ include('strings'),
+ include('core'),
+ include('nums'),
+ (r'[a-z0-9]([\w\-]*[a-z0-9]+)?', Name.Variable),
+ # (r'[\s]+', Text),
+ (r'[ \t]+', Whitespace),
+ ],
+ 'comment': [
+ (r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment),
+ ],
+ 'core': [
+ # Figurative constants
+ (r'(^|(?<=[^\w\-]))(ALL\s+)?'
+ r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
+ r'\s*($|(?=[^\w\-]))',
+ Name.Constant),
+
+ # Reserved words STATEMENTS and other bolds
+ (words((
+ 'ACCEPT', 'ADD', 'ALLOCATE', 'CALL', 'CANCEL', 'CLOSE', 'COMPUTE',
+ 'CONFIGURATION', 'CONTINUE', 'DATA', 'DELETE', 'DISPLAY', 'DIVIDE',
+ 'DIVISION', 'ELSE', 'END', 'END-ACCEPT',
+ 'END-ADD', 'END-CALL', 'END-COMPUTE', 'END-DELETE', 'END-DISPLAY',
+ 'END-DIVIDE', 'END-EVALUATE', 'END-IF', 'END-MULTIPLY', 'END-OF-PAGE',
+ 'END-PERFORM', 'END-READ', 'END-RETURN', 'END-REWRITE', 'END-SEARCH',
+ 'END-START', 'END-STRING', 'END-SUBTRACT', 'END-UNSTRING', 'END-WRITE',
+ 'ENVIRONMENT', 'EVALUATE', 'EXIT', 'FD', 'FILE', 'FILE-CONTROL', 'FOREVER',
+ 'FREE', 'GENERATE', 'GO', 'GOBACK', 'IDENTIFICATION', 'IF', 'INITIALIZE',
+ 'INITIATE', 'INPUT-OUTPUT', 'INSPECT', 'INVOKE', 'I-O-CONTROL', 'LINKAGE',
+ 'LOCAL-STORAGE', 'MERGE', 'MOVE', 'MULTIPLY', 'OPEN', 'PERFORM',
+ 'PROCEDURE', 'PROGRAM-ID', 'RAISE', 'READ', 'RELEASE', 'RESUME',
+ 'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET',
+ 'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS',
+ 'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE',
+ 'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^\w\-]))',
+ suffix=r'\s*($|(?=[^\w\-]))'),
+ Keyword.Reserved),
+
+ # Reserved words
+ (words((
+ 'ACCESS', 'ADDRESS', 'ADVANCING', 'AFTER', 'ALL',
+ 'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER',
+ 'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE'
+ 'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS',
+ 'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC',
+ 'AUTOTERMINATE', 'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL',
+ 'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING',
+ 'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL',
+ 'COLLATING', 'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE',
+ 'COMMIT', 'COMMON', 'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL',
+ 'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT',
+ 'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE',
+ 'DEBUGGING', 'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED',
+ 'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK',
+ 'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC',
+ 'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP',
+ 'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION',
+ 'EXCLUSIVE', 'EXTEND', 'EXTERNAL', 'FILE-ID', 'FILLER', 'FINAL',
+ 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT',
+ 'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL',
+ 'FUNCTION', 'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP',
+ 'HEADING', 'HIGHLIGHT', 'I-O', 'ID',
+ 'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE',
+ 'INITIAL', 'INITIALIZED', 'INPUT', 'INTO', 'INTRINSIC', 'INVALID',
+ 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL',
+ 'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE',
+ 'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK',
+ 'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE', 'MULTIPLE',
+ 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE', 'NEGATIVE', 'NEXT', 'NO',
+ 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC', 'NUMERIC-EDITED',
+ 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY',
+ 'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW',
+ 'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH',
+ 'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS',
+ 'PRINTER', 'PRINTING', 'PROCEDURE-POINTER', 'PROCEDURES',
+ 'PROCEED', 'PROGRAM', 'PROGRAM-POINTER', 'PROMPT', 'QUOTE',
+ 'QUOTES', 'RANDOM', 'RD', 'RECORD', 'RECORDING', 'RECORDS', 'RECURSIVE',
+ 'REDEFINES', 'REEL', 'REFERENCE', 'RELATIVE', 'REMAINDER', 'REMOVAL',
+ 'RENAMES', 'REPLACING', 'REPORT', 'REPORTING', 'REPORTS', 'REPOSITORY',
+ 'REQUIRED', 'RESERVE', 'RETURNING', 'REVERSE-VIDEO', 'REWIND',
+ 'RIGHT', 'ROLLBACK', 'ROUNDED', 'RUN', 'SAME', 'SCROLL',
+ 'SECURE', 'SEGMENT-LIMIT', 'SELECT', 'SENTENCE', 'SEPARATE',
+ 'SEQUENCE', 'SEQUENTIAL', 'SHARING', 'SIGN', 'SIGNED', 'SIGNED-INT',
+ 'SIGNED-LONG', 'SIGNED-SHORT', 'SIZE', 'SORT-MERGE', 'SOURCE',
+ 'SOURCE-COMPUTER', 'SPECIAL-NAMES', 'STANDARD',
+ 'STANDARD-1', 'STANDARD-2', 'STATUS', 'SUBKEY', 'SUM',
+ 'SYMBOLIC', 'SYNC', 'SYNCHRONIZED', 'TALLYING', 'TAPE',
+ 'TEST', 'THROUGH', 'THRU', 'TIME', 'TIMES', 'TO', 'TOP', 'TRAILING',
+ 'TRANSFORM', 'TYPE', 'UNDERLINE', 'UNIT', 'UNSIGNED',
+ 'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP',
+ 'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING',
+ 'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'),
+ prefix=r'(^|(?<=[^\w\-]))', suffix=r'\s*($|(?=[^\w\-]))'),
+ Keyword.Pseudo),
+
+ # inactive reserved words
+ (words((
+ 'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE',
+ 'B-AND', 'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER',
+ 'CF', 'CH', 'CHAIN', 'CLASS-ID', 'CLASSIFICATION', 'COMMUNICATION',
+ 'CONDITION', 'DATA-POINTER', 'DESTINATION', 'DISABLE', 'EC', 'EGI',
+ 'EMI', 'ENABLE', 'END-RECEIVE', 'ENTRY-CONVENTION', 'EO', 'ESI',
+ 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY', 'FLOAT-BINARY-16',
+ 'FLOAT-BINARY-34', 'FLOAT-BINARY-7', 'FLOAT-DECIMAL-16',
+ 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT', 'FUNCTION-POINTER',
+ 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY', 'INHERITS',
+ 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE',
+ 'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME',
+ 'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE',
+ 'NORMAL', 'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE',
+ 'PAGE-COUNTER', 'PF', 'PH', 'PROPERTY', 'PROTOTYPE', 'PURGE',
+ 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE', 'RELATION', 'REPLACE',
+ 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY', 'RF', 'RH',
+ 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT',
+ 'STEP', 'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3',
+ 'SUPER', 'SYMBOL', 'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT',
+ 'TYPEDEF', 'UCS-4', 'UNIVERSAL', 'USER-DEFAULT', 'UTF-16', 'UTF-8',
+ 'VAL-STATUS', 'VALID', 'VALIDATE', 'VALIDATE-STATUS'),
+ prefix=r'(^|(?<=[^\w\-]))', suffix=r'\s*($|(?=[^\w\-]))'),
+ Error),
+
+ # Data Types
+ (r'(^|(?<=[^\w\-]))'
+ r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
+ r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
+ r'BINARY-C-LONG|'
+ r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
+ r'BINARY)\s*($|(?=[^\w\-]))', Keyword.Type),
+
+ # Operators
+ (r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
+
+ # (r'(::)', Keyword.Declaration),
+
+ (r'([(),;:&%.])', Punctuation),
+
+ # Intrinsics
+ (r'(^|(?<=[^\w\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
+ r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
+ r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
+ r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
+ r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|'
+ r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG(?:10)?|'
+ r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|'
+ r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|'
+ r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|'
+ r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
+ r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
+ r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
+ r'($|(?=[^\w\-]))', Name.Function),
+
+ # Booleans
+ (r'(^|(?<=[^\w\-]))(true|false)\s*($|(?=[^\w\-]))', Name.Builtin),
+ # Comparing Operators
+ (r'(^|(?<=[^\w\-]))(equal|equals|ne|lt|le|gt|ge|'
+ r'greater|less|than|not|and|or)\s*($|(?=[^\w\-]))', Operator.Word),
+ ],
+
+ # \"[^\"\n]*\"|\'[^\'\n]*\'
+ 'strings': [
+ # apparently strings can be delimited by EOL if they are continued
+ # in the next line
+ (r'"[^"\n]*("|\n)', String.Double),
+ (r"'[^'\n]*('|\n)", String.Single),
+ ],
+
+ 'nums': [
+ (r'\d+(\s*|\.$|$)', Number.Integer),
+ (r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float),
+ (r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float),
+ ],
+ }
+
+
+class CobolFreeformatLexer(CobolLexer):
+ """
+ Lexer for Free format OpenCOBOL code.
+
+ .. versionadded:: 1.6
+ """
+ name = 'COBOLFree'
+ aliases = ['cobolfree']
+ filenames = ['*.cbl', '*.CBL']
+ mimetypes = []
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'comment': [
+ (r'(\*>.*\n|^\w*\*.*$)', Comment),
+ ],
+ }
+
+
+class ABAPLexer(RegexLexer):
+ """
+ Lexer for ABAP, SAP's integrated language.
+
+ .. versionadded:: 1.1
+ """
+ name = 'ABAP'
+ aliases = ['abap']
+ filenames = ['*.abap', '*.ABAP']
+ mimetypes = ['text/x-abap']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'common': [
+ (r'\s+', Whitespace),
+ (r'^\*.*$', Comment.Single),
+ (r'\".*?\n', Comment.Single),
+ (r'##\w+', Comment.Special),
+ ],
+ 'variable-names': [
+ (r'<\S+>', Name.Variable),
+ (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable),
+ ],
+ 'root': [
+ include('common'),
+ # function calls
+ (r'CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION)',
+ Keyword),
+ (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
+ r'TRANSACTION|TRANSFORMATION))\b',
+ Keyword),
+ (r'(FORM|PERFORM)(\s+)(\w+)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'(PERFORM)(\s+)(\()(\w+)(\))',
+ bygroups(Keyword, Whitespace, Punctuation, Name.Variable, Punctuation)),
+ (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace, Keyword)),
+
+ # method implementation
+ (r'(METHOD)(\s+)([\w~]+)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ # method calls
+ (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)',
+ bygroups(Whitespace, Name.Variable, Operator, Name.Function)),
+ # call methodnames returning style
+ (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function),
+
+ # text elements
+ (r'(TEXT)(-)(\d{3})',
+ bygroups(Keyword, Punctuation, Number.Integer)),
+ (r'(TEXT)(-)(\w{3})',
+ bygroups(Keyword, Punctuation, Name.Variable)),
+
+ # keywords with dashes in them.
+ # these need to be first, because for instance the -ID part
+ # of MESSAGE-ID wouldn't get highlighted if MESSAGE was
+ # first in the list of keywords.
+ (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
+ r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
+ r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
+ r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
+ r'FIELD-GROUPS|FIELD-SYMBOLS|FIELD-SYMBOL|FUNCTION-POOL|'
+ r'INTERFACE-POOL|INVERTED-DATE|'
+ r'LOAD-OF-PROGRAM|LOG-POINT|'
+ r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
+ r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
+ r'OUTPUT-LENGTH|PRINT-CONTROL|'
+ r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
+ r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
+ r'TYPE-POOL|TYPE-POOLS|NO-DISPLAY'
+ r')\b', Keyword),
+
+ # keyword kombinations
+ (r'(?<![-\>])(CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
+ r'(PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
+ r'(TYPE|LIKE)\s+((LINE\s+OF|REF\s+TO|'
+ r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
+ r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
+ r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
+ r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
+ r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
+ r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
+ r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
+ r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
+ r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
+ r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
+ r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
+ r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
+ r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
+ r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
+ r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
+ r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
+ r'FREE\s(MEMORY|OBJECT)?|'
+ r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
+ r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
+ r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
+ r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
+ r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
+ r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
+ r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
+ r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
+ r'SKIP|ULINE)|'
+ r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
+ r'TO LIST-PROCESSING|TO TRANSACTION)'
+ r'(ENDING|STARTING)\s+AT|'
+ r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
+ r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
+ r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
+ r'(BEGIN|END)\s+OF|'
+ r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
+ r'COMPARING(\s+ALL\s+FIELDS)?|'
+ r'(INSERT|APPEND)(\s+INITIAL\s+LINE\s+(IN)?TO|\s+LINES\s+OF)?|'
+ r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
+ r'END-OF-(DEFINITION|PAGE|SELECTION)|'
+ r'WITH\s+FRAME(\s+TITLE)|'
+ r'(REPLACE|FIND)\s+((FIRST|ALL)\s+OCCURRENCES?\s+OF\s+)?(SUBSTRING|REGEX)?|'
+ r'MATCH\s+(LENGTH|COUNT|LINE|OFFSET)|'
+ r'(RESPECTING|IGNORING)\s+CASE|'
+ r'IN\s+UPDATE\s+TASK|'
+ r'(SOURCE|RESULT)\s+(XML)?|'
+ r'REFERENCE\s+INTO|'
+
+ # simple kombinations
+ r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
+ r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
+ r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
+ r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
+ r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
+ r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
+ r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE|COMMON\s+PART)\b', Keyword),
+
+ # single word keywords.
+ (r'(^|(?<=(\s|\.)))(ABBREVIATED|ABSTRACT|ADD|ALIASES|ALIGN|ALPHA|'
+ r'ASSERT|AS|ASSIGN(ING)?|AT(\s+FIRST)?|'
+ r'BACK|BLOCK|BREAK-POINT|'
+ r'CASE|CAST|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|COND|CONV|'
+ r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
+ r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|COUNTRY|CURRENCY|'
+ r'DATA|DATE|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
+ r'DETAIL|DIRECTORY|DIVIDE|DO|DUMMY|'
+ r'ELSE(IF)?|ENDAT|ENDCASE|ENDCATCH|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
+ r'ENDIF|ENDINTERFACE|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|ENDWHILE|'
+ r'ENHANCEMENT|EVENTS|EXACT|EXCEPTIONS?|EXIT|EXPONENT|EXPORT|EXPORTING|EXTRACT|'
+ r'FETCH|FIELDS?|FOR|FORM|FORMAT|FREE|FROM|FUNCTION|'
+ r'HIDE|'
+ r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
+ r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
+ r'LANGUAGE|LEAVE|LENGTH|LINES|LOAD|LOCAL|'
+ r'JOIN|'
+ r'KEY|'
+ r'NEW|NEXT|'
+ r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFIER|MODIFY|MOVE|MULTIPLY|'
+ r'NODES|NUMBER|'
+ r'OBLIGATORY|OBJECT|OF|OFF|ON|OTHERS|OVERLAY|'
+ r'PACK|PAD|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|PF\d\d|'
+ r'RAISE|RAISING|RANGES?|READ|RECEIVE|REDEFINITION|REFRESH|REJECT|REPORT|RESERVE|'
+ r'REF|RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|REPLACE|'
+ r'SCROLL|SEARCH|SELECT|SHIFT|SIGN|SINGLE|SIZE|SKIP|SORT|SPLIT|STATICS|STOP|'
+ r'STYLE|SUBMATCHES|SUBMIT|SUBTRACT|SUM(?!\()|SUMMARY|SUMMING|SUPPLY|SWITCH|'
+ r'TABLE|TABLES|TIMESTAMP|TIMES?|TIMEZONE|TITLE|\??TO|'
+ r'TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
+ r'ULINE|UNDER|UNPACK|UPDATE|USING|'
+ r'VALUE|VALUES|VIA|VARYING|VARY|'
+ r'WAIT|WHEN|WHERE|WIDTH|WHILE|WITH|WINDOW|WRITE|XSD|ZERO)\b', Keyword),
+
+ # builtins
+ (r'(abs|acos|asin|atan|'
+ r'boolc|boolx|bit_set|'
+ r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
+ r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
+ r'count|count_any_of|count_any_not_of|'
+ r'dbmaxlen|distance|'
+ r'escape|exp|'
+ r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
+ r'insert|'
+ r'lines|log|log10|'
+ r'match|matches|'
+ r'nmax|nmin|numofchar|'
+ r'repeat|replace|rescale|reverse|round|'
+ r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
+ r'substring|substring_after|substring_from|substring_before|substring_to|'
+ r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
+ r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
+
+ (r'&[0-9]', Name),
+ (r'[0-9]+', Number.Integer),
+
+ # operators which look like variable names before
+ # parsing variable names.
+ (r'(?<=(\s|.))(AND|OR|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
+ r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
+ r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator.Word),
+
+ include('variable-names'),
+
+ # standard operators after variable names,
+ # because < and > are part of field symbols.
+ (r'[?*<>=\-+&]', Operator),
+ (r"'(''|[^'])*'", String.Single),
+ (r"`([^`])*`", String.Single),
+ (r"([|}])([^{}|]*?)([|{])",
+ bygroups(Punctuation, String.Single, Punctuation)),
+ (r'[/;:()\[\],.]', Punctuation),
+ (r'(!)(\w+)', bygroups(Operator, Name)),
+ ],
+ }
+
+
+class OpenEdgeLexer(RegexLexer):
+ """
+ Lexer for `OpenEdge ABL (formerly Progress)
+ <http://web.progress.com/en/openedge/abl.html>`_ source code.
+
+ .. versionadded:: 1.5
+ """
+ name = 'OpenEdge ABL'
+ aliases = ['openedge', 'abl', 'progress']
+ filenames = ['*.p', '*.cls']
+ mimetypes = ['text/x-openedge', 'application/x-openedge']
+
+ types = (r'(?i)(^|(?<=[^\w\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|'
+ r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|'
+ r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|'
+ r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|'
+ r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^\w\-]))')
+
+ keywords = words(OPENEDGEKEYWORDS,
+ prefix=r'(?i)(^|(?<=[^\w\-]))',
+ suffix=r'\s*($|(?=[^\w\-]))')
+
+ tokens = {
+ 'root': [
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'\{', Comment.Preproc, 'preprocessor'),
+ (r'\s*&.*', Comment.Preproc),
+ (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex),
+ (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration),
+ (types, Keyword.Type),
+ (keywords, Name.Builtin),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'\s+', Whitespace),
+ (r'[+*/=-]', Operator),
+ (r'[.:()]', Punctuation),
+ (r'.', Name.Variable), # Lazy catch-all
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'preprocessor': [
+ (r'[^{}]', Comment.Preproc),
+ (r'\{', Comment.Preproc, '#push'),
+ (r'\}', Comment.Preproc, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ """Try to identify OpenEdge ABL based on a few common constructs."""
+ result = 0
+
+ if 'END.' in text:
+ result += 0.05
+
+ if 'END PROCEDURE.' in text:
+ result += 0.05
+
+ if 'ELSE DO:' in text:
+ result += 0.05
+
+ return result
+
+
+class GoodDataCLLexer(RegexLexer):
+ """
+ Lexer for `GoodData-CL
+ <https://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/\
+com/gooddata/processor/COMMANDS.txt>`_
+ script files.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'GoodData-CL'
+ aliases = ['gooddata-cl']
+ filenames = ['*.gdc']
+ mimetypes = ['text/x-gooddata-cl']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ # Comments
+ (r'#.*', Comment.Single),
+ # Function call
+ (r'[a-z]\w*', Name.Function),
+ # Argument list
+ (r'\(', Punctuation, 'args-list'),
+ # Punctuation
+ (r';', Punctuation),
+ # Space is not significant
+ (r'\s+', Text)
+ ],
+ 'args-list': [
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'[a-z]\w*', Name.Variable),
+ (r'=', Operator),
+ (r'"', String, 'string-literal'),
+ (r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
+ # Space is not significant
+ (r'\s', Whitespace)
+ ],
+ 'string-literal': [
+ (r'\\[tnrfbae"\\]', String.Escape),
+ (r'"', String, '#pop'),
+ (r'[^\\"]+', String)
+ ]
+ }
+
+
+class MaqlLexer(RegexLexer):
+ """
+ Lexer for `GoodData MAQL
+ <https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_
+ scripts.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'MAQL'
+ aliases = ['maql']
+ filenames = ['*.maql']
+ mimetypes = ['text/x-gooddata-maql', 'application/x-gooddata-maql']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ # IDENTITY
+ (r'IDENTIFIER\b', Name.Builtin),
+ # IDENTIFIER
+ (r'\{[^}]+\}', Name.Variable),
+ # NUMBER
+ (r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
+ # STRING
+ (r'"', String, 'string-literal'),
+ # RELATION
+ (r'\<\>|\!\=', Operator),
+ (r'\=|\>\=|\>|\<\=|\<', Operator),
+ # :=
+ (r'\:\=', Operator),
+ # OBJECT
+ (r'\[[^]]+\]', Name.Variable.Class),
+ # keywords
+ (words((
+ 'DIMENSION', 'DIMENSIONS', 'BOTTOM', 'METRIC', 'COUNT', 'OTHER',
+ 'FACT', 'WITH', 'TOP', 'OR', 'ATTRIBUTE', 'CREATE', 'PARENT',
+ 'FALSE', 'ROW', 'ROWS', 'FROM', 'ALL', 'AS', 'PF', 'COLUMN',
+ 'COLUMNS', 'DEFINE', 'REPORT', 'LIMIT', 'TABLE', 'LIKE', 'AND',
+ 'BY', 'BETWEEN', 'EXCEPT', 'SELECT', 'MATCH', 'WHERE', 'TRUE',
+ 'FOR', 'IN', 'WITHOUT', 'FILTER', 'ALIAS', 'WHEN', 'NOT', 'ON',
+ 'KEYS', 'KEY', 'FULLSET', 'PRIMARY', 'LABELS', 'LABEL',
+ 'VISUAL', 'TITLE', 'DESCRIPTION', 'FOLDER', 'ALTER', 'DROP',
+ 'ADD', 'DATASET', 'DATATYPE', 'INT', 'BIGINT', 'DOUBLE', 'DATE',
+ 'VARCHAR', 'DECIMAL', 'SYNCHRONIZE', 'TYPE', 'DEFAULT', 'ORDER',
+ 'ASC', 'DESC', 'HYPERLINK', 'INCLUDE', 'TEMPLATE', 'MODIFY'),
+ suffix=r'\b'),
+ Keyword),
+ # FUNCNAME
+ (r'[a-z]\w*\b', Name.Function),
+ # Comments
+ (r'#.*', Comment.Single),
+ # Punctuation
+ (r'[,;()]', Punctuation),
+ # Space is not significant
+ (r'\s+', Whitespace)
+ ],
+ 'string-literal': [
+ (r'\\[tnrfbae"\\]', String.Escape),
+ (r'"', String, '#pop'),
+ (r'[^\\"]+', String)
+ ],
+ }
diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py
new file mode 100644
index 0000000..65f3c4b
--- /dev/null
+++ b/pygments/lexers/c_cpp.py
@@ -0,0 +1,409 @@
+"""
+ pygments.lexers.c_cpp
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for C/C++ languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, \
+ this, inherit, default, words
+from pygments.util import get_bool_opt
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['CLexer', 'CppLexer']
+
+
+class CFamilyLexer(RegexLexer):
+ """
+ For C family source code. This is used as a base class to avoid repetitious
+ definitions.
+ """
+
+ # The trailing ?, rather than *, avoids a geometric performance drop here.
+ #: only one /* */ style comment
+ _ws1 = r'\s*(?:/[*].*?[*]/\s*)?'
+
+ # Hexadecimal part in an hexadecimal integer/floating-point literal.
+ # This includes decimal separators matching.
+ _hexpart = r'[0-9a-fA-F](\'?[0-9a-fA-F])*'
+ # Decimal part in an decimal integer/floating-point literal.
+ # This includes decimal separators matching.
+ _decpart = r'\d(\'?\d)*'
+ # Integer literal suffix (e.g. 'ull' or 'll').
+ _intsuffix = r'(([uU][lL]{0,2})|[lL]{1,2}[uU]?)?'
+
+ # Identifier regex with C and C++ Universal Character Name (UCN) support.
+ _ident = r'(?!\d)(?:[\w$]|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8})+'
+ _namespaced_ident = r'(?!\d)(?:[\w$]|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|::)+'
+
+ # Single and multiline comment regexes
+ # Beware not to use *? for the inner content! When these regexes
+ # are embedded in larger regexes, that can cause the stuff*? to
+ # match more than it would have if the regex had been used in
+ # a standalone way ...
+ _comment_single = r'//(?:.|(?<=\\)\n)*\n'
+ _comment_multiline = r'/(?:\\\n)?[*](?:[^*]|[*](?!(?:\\\n)?/))*[*](?:\\\n)?/'
+
+ # Regex to match optional comments
+ _possible_comments = rf'\s*(?:(?:(?:{_comment_single})|(?:{_comment_multiline}))\s*)*'
+
+ tokens = {
+ 'whitespace': [
+ # preprocessor directives: without whitespace
+ (r'^#if\s+0', Comment.Preproc, 'if0'),
+ ('^#', Comment.Preproc, 'macro'),
+ # or with whitespace
+ ('^(' + _ws1 + r')(#if\s+0)',
+ bygroups(using(this), Comment.Preproc), 'if0'),
+ ('^(' + _ws1 + ')(#)',
+ bygroups(using(this), Comment.Preproc), 'macro'),
+ # Labels:
+ # Line start and possible indentation.
+ (r'(^[ \t]*)'
+ # Not followed by keywords which can be mistaken as labels.
+ r'(?!(?:public|private|protected|default)\b)'
+ # Actual label, followed by a single colon.
+ r'(' + _ident + r')(\s*)(:)(?!:)',
+ bygroups(Whitespace, Name.Label, Whitespace, Punctuation)),
+ (r'\n', Whitespace),
+ (r'[^\S\n]+', Whitespace),
+ (r'\\\n', Text), # line continuation
+ (_comment_single, Comment.Single),
+ (_comment_multiline, Comment.Multiline),
+ # Open until EOF, so no ending delimiter
+ (r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
+ ],
+ 'statements': [
+ include('keywords'),
+ include('types'),
+ (r'([LuU]|u8)?(")', bygroups(String.Affix, String), 'string'),
+ (r"([LuU]|u8)?(')(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])(')",
+ bygroups(String.Affix, String.Char, String.Char, String.Char)),
+
+ # Hexadecimal floating-point literals (C11, C++17)
+ (r'0[xX](' + _hexpart + r'\.' + _hexpart + r'|\.' + _hexpart +
+ r'|' + _hexpart + r')[pP][+-]?' + _hexpart + r'[lL]?', Number.Float),
+
+ (r'(-)?(' + _decpart + r'\.' + _decpart + r'|\.' + _decpart + r'|' +
+ _decpart + r')[eE][+-]?' + _decpart + r'[fFlL]?', Number.Float),
+ (r'(-)?((' + _decpart + r'\.(' + _decpart + r')?|\.' +
+ _decpart + r')[fFlL]?)|(' + _decpart + r'[fFlL])', Number.Float),
+ (r'(-)?0[xX]' + _hexpart + _intsuffix, Number.Hex),
+ (r'(-)?0[bB][01](\'?[01])*' + _intsuffix, Number.Bin),
+ (r'(-)?0(\'?[0-7])+' + _intsuffix, Number.Oct),
+ (r'(-)?' + _decpart + _intsuffix, Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'[()\[\],.]', Punctuation),
+ (r'(true|false|NULL)\b', Name.Builtin),
+ (_ident, Name)
+ ],
+ 'types': [
+ (words(('int8', 'int16', 'int32', 'int64', 'wchar_t'), prefix=r'__',
+ suffix=r'\b'), Keyword.Reserved),
+ (words(('bool', 'int', 'long', 'float', 'short', 'double', 'char',
+ 'unsigned', 'signed', 'void'), suffix=r'\b'), Keyword.Type)
+ ],
+ 'keywords': [
+ (r'(struct|union)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+ (r'case\b', Keyword, 'case-value'),
+ (words(('asm', 'auto', 'break', 'const', 'continue', 'default',
+ 'do', 'else', 'enum', 'extern', 'for', 'goto', 'if',
+ 'register', 'restricted', 'return', 'sizeof', 'struct',
+ 'static', 'switch', 'typedef', 'volatile', 'while', 'union',
+ 'thread_local', 'alignas', 'alignof', 'static_assert', '_Pragma'),
+ suffix=r'\b'), Keyword),
+ (words(('inline', '_inline', '__inline', 'naked', 'restrict',
+ 'thread'), suffix=r'\b'), Keyword.Reserved),
+ # Vector intrinsics
+ (r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
+ # Microsoft-isms
+ (words((
+ 'asm', 'based', 'except', 'stdcall', 'cdecl',
+ 'fastcall', 'declspec', 'finally', 'try',
+ 'leave', 'w64', 'unaligned', 'raise', 'noop',
+ 'identifier', 'forceinline', 'assume'),
+ prefix=r'__', suffix=r'\b'), Keyword.Reserved)
+ ],
+ 'root': [
+ include('whitespace'),
+ include('keywords'),
+ # functions
+ (r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments
+ r'(' + _possible_comments + r')'
+ r'(' + _namespaced_ident + r')' # method name
+ r'(' + _possible_comments + r')'
+ r'(\([^;"\')]*?\))' # signature
+ r'(' + _possible_comments + r')'
+ r'([^;{/"\']*)(\{)',
+ bygroups(using(this), using(this, state='whitespace'),
+ Name.Function, using(this, state='whitespace'),
+ using(this), using(this, state='whitespace'),
+ using(this), Punctuation),
+ 'function'),
+ # function declarations
+ (r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments
+ r'(' + _possible_comments + r')'
+ r'(' + _namespaced_ident + r')' # method name
+ r'(' + _possible_comments + r')'
+ r'(\([^;"\')]*?\))' # signature
+ r'(' + _possible_comments + r')'
+ r'([^;/"\']*)(;)',
+ bygroups(using(this), using(this, state='whitespace'),
+ Name.Function, using(this, state='whitespace'),
+ using(this), using(this, state='whitespace'),
+ using(this), Punctuation)),
+ include('types'),
+ default('statement'),
+ ],
+ 'statement': [
+ include('whitespace'),
+ include('statements'),
+ (r'\}', Punctuation),
+ (r'[{;]', Punctuation, '#pop'),
+ ],
+ 'function': [
+ include('whitespace'),
+ include('statements'),
+ (';', Punctuation),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
+ r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'('+_ws1+r')(include)('+_ws1+r')("[^"]+")([^\n]*)',
+ bygroups(using(this), Comment.Preproc, using(this),
+ Comment.PreprocFile, Comment.Single)),
+ (r'('+_ws1+r')(include)('+_ws1+r')(<[^>]+>)([^\n]*)',
+ bygroups(using(this), Comment.Preproc, using(this),
+ Comment.PreprocFile, Comment.Single)),
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'//.*?\n', Comment.Single, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
+ (r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
+ (r'.*?\n', Comment),
+ ],
+ 'classname': [
+ (_ident, Name.Class, '#pop'),
+ # template specification
+ (r'\s*(?=>)', Text, '#pop'),
+ default('#pop')
+ ],
+ # Mark identifiers preceded by `case` keyword as constants.
+ 'case-value': [
+ (r'(?<!:)(:)(?!:)', Punctuation, '#pop'),
+ (_ident, Name.Constant),
+ include('whitespace'),
+ include('statements'),
+ ]
+ }
+
+ stdlib_types = {
+ 'size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t', 'sig_atomic_t', 'fpos_t',
+ 'clock_t', 'time_t', 'va_list', 'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t',
+ 'mbstate_t', 'wctrans_t', 'wint_t', 'wctype_t'}
+ c99_types = {
+ 'int8_t', 'int16_t', 'int32_t', 'int64_t', 'uint8_t',
+ 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t', 'int_least16_t',
+ 'int_least32_t', 'int_least64_t', 'uint_least8_t', 'uint_least16_t',
+ 'uint_least32_t', 'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
+ 'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t', 'uint_fast64_t',
+ 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t'}
+ linux_types = {
+ 'clockid_t', 'cpu_set_t', 'cpumask_t', 'dev_t', 'gid_t', 'id_t', 'ino_t', 'key_t',
+ 'mode_t', 'nfds_t', 'pid_t', 'rlim_t', 'sig_t', 'sighandler_t', 'siginfo_t',
+ 'sigset_t', 'sigval_t', 'socklen_t', 'timer_t', 'uid_t'}
+ c11_atomic_types = {
+ 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
+ 'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
+ 'atomic_llong', 'atomic_ullong', 'atomic_char16_t', 'atomic_char32_t', 'atomic_wchar_t',
+ 'atomic_int_least8_t', 'atomic_uint_least8_t', 'atomic_int_least16_t',
+ 'atomic_uint_least16_t', 'atomic_int_least32_t', 'atomic_uint_least32_t',
+ 'atomic_int_least64_t', 'atomic_uint_least64_t', 'atomic_int_fast8_t',
+ 'atomic_uint_fast8_t', 'atomic_int_fast16_t', 'atomic_uint_fast16_t',
+ 'atomic_int_fast32_t', 'atomic_uint_fast32_t', 'atomic_int_fast64_t',
+ 'atomic_uint_fast64_t', 'atomic_intptr_t', 'atomic_uintptr_t', 'atomic_size_t',
+ 'atomic_ptrdiff_t', 'atomic_intmax_t', 'atomic_uintmax_t'}
+
+ def __init__(self, **options):
+ self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
+ self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
+ self.c11highlighting = get_bool_opt(options, 'c11highlighting', True)
+ self.platformhighlighting = get_bool_opt(options, 'platformhighlighting', True)
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name:
+ if self.stdlibhighlighting and value in self.stdlib_types:
+ token = Keyword.Type
+ elif self.c99highlighting and value in self.c99_types:
+ token = Keyword.Type
+ elif self.c11highlighting and value in self.c11_atomic_types:
+ token = Keyword.Type
+ elif self.platformhighlighting and value in self.linux_types:
+ token = Keyword.Type
+ yield index, token, value
+
+
+class CLexer(CFamilyLexer):
+ """
+ For C source code with preprocessor directives.
+
+ Additional options accepted:
+
+ `stdlibhighlighting`
+ Highlight common types found in the C/C++ standard library (e.g. `size_t`).
+ (default: ``True``).
+
+ `c99highlighting`
+ Highlight common types found in the C99 standard library (e.g. `int8_t`).
+ Actually, this includes all fixed-width integer types.
+ (default: ``True``).
+
+ `c11highlighting`
+ Highlight atomic types found in the C11 standard library (e.g. `atomic_bool`).
+ (default: ``True``).
+
+ `platformhighlighting`
+ Highlight common types found in the platform SDK headers (e.g. `clockid_t` on Linux).
+ (default: ``True``).
+ """
+ name = 'C'
+ aliases = ['c']
+ filenames = ['*.c', '*.h', '*.idc', '*.x[bp]m']
+ mimetypes = ['text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap']
+ priority = 0.1
+
+ tokens = {
+ 'keywords': [
+ (words((
+ '_Alignas', '_Alignof', '_Noreturn', '_Generic', '_Thread_local',
+ '_Static_assert', '_Imaginary', 'noreturn', 'imaginary', 'complex'),
+ suffix=r'\b'), Keyword),
+ inherit
+ ],
+ 'types': [
+ (words(('_Bool', '_Complex', '_Atomic'), suffix=r'\b'), Keyword.Type),
+ inherit
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r'^\s*#include [<"]', text, re.MULTILINE):
+ return 0.1
+ if re.search(r'^\s*#ifn?def ', text, re.MULTILINE):
+ return 0.1
+
+
+class CppLexer(CFamilyLexer):
+ """
+ For C++ source code with preprocessor directives.
+
+ Additional options accepted:
+
+ `stdlibhighlighting`
+ Highlight common types found in the C/C++ standard library (e.g. `size_t`).
+ (default: ``True``).
+
+ `c99highlighting`
+ Highlight common types found in the C99 standard library (e.g. `int8_t`).
+ Actually, this includes all fixed-width integer types.
+ (default: ``True``).
+
+ `c11highlighting`
+ Highlight atomic types found in the C11 standard library (e.g. `atomic_bool`).
+ (default: ``True``).
+
+ `platformhighlighting`
+ Highlight common types found in the platform SDK headers (e.g. `clockid_t` on Linux).
+ (default: ``True``).
+ """
+ name = 'C++'
+ url = 'https://isocpp.org/'
+ aliases = ['cpp', 'c++']
+ filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
+ '*.cc', '*.hh', '*.cxx', '*.hxx',
+ '*.C', '*.H', '*.cp', '*.CPP', '*.tpp']
+ mimetypes = ['text/x-c++hdr', 'text/x-c++src']
+ priority = 0.1
+
+ tokens = {
+ 'statements': [
+ # C++11 raw strings
+ (r'((?:[LuU]|u8)?R)(")([^\\()\s]{,16})(\()((?:.|\n)*?)(\)\3)(")',
+ bygroups(String.Affix, String, String.Delimiter, String.Delimiter,
+ String, String.Delimiter, String)),
+ inherit,
+ ],
+ 'root': [
+ inherit,
+ # C++ Microsoft-isms
+ (words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
+ 'multiple_inheritance', 'interface', 'event'),
+ prefix=r'__', suffix=r'\b'), Keyword.Reserved),
+ # Offload C++ extensions, http://offload.codeplay.com/
+ (r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
+ ],
+ 'enumname': [
+ include('whitespace'),
+ # 'enum class' and 'enum struct' C++11 support
+ (words(('class', 'struct'), suffix=r'\b'), Keyword),
+ (CFamilyLexer._ident, Name.Class, '#pop'),
+ # template specification
+ (r'\s*(?=>)', Text, '#pop'),
+ default('#pop')
+ ],
+ 'keywords': [
+ (r'(class|concept|typename)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+ (words((
+ 'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
+ 'export', 'friend', 'mutable', 'new', 'operator',
+ 'private', 'protected', 'public', 'reinterpret_cast', 'class',
+ 'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
+ 'try', 'typeid', 'using', 'virtual', 'constexpr', 'nullptr', 'concept',
+ 'decltype', 'noexcept', 'override', 'final', 'constinit', 'consteval',
+ 'co_await', 'co_return', 'co_yield', 'requires', 'import', 'module',
+ 'typename'),
+ suffix=r'\b'), Keyword),
+ (r'namespace\b', Keyword, 'namespace'),
+ (r'(enum)(\s+)', bygroups(Keyword, Whitespace), 'enumname'),
+ inherit
+ ],
+ 'types': [
+ (r'char(16_t|32_t|8_t)\b', Keyword.Type),
+ inherit
+ ],
+ 'namespace': [
+ (r'[;{]', Punctuation, ('#pop', 'root')),
+ (r'inline\b', Keyword.Reserved),
+ (CFamilyLexer._ident, Name.Namespace),
+ include('statement')
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search('#include <[a-z_]+>', text):
+ return 0.2
+ if re.search('using namespace ', text):
+ return 0.4
diff --git a/pygments/lexers/c_like.py b/pygments/lexers/c_like.py
new file mode 100644
index 0000000..96a3226
--- /dev/null
+++ b/pygments/lexers/c_like.py
@@ -0,0 +1,666 @@
+"""
+ pygments.lexers.c_like
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for other C-like languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
+ default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+from pygments.lexers.c_cpp import CLexer, CppLexer
+from pygments.lexers import _mql_builtins
+
+__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer',
+ 'CudaLexer', 'SwigLexer', 'MqlLexer', 'ArduinoLexer', 'CharmciLexer',
+ 'OmgIdlLexer']
+
+
+class PikeLexer(CppLexer):
+ """
+ For `Pike <http://pike.lysator.liu.se/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Pike'
+ aliases = ['pike']
+ filenames = ['*.pike', '*.pmod']
+ mimetypes = ['text/x-pike']
+
+ tokens = {
+ 'statements': [
+ (words((
+ 'catch', 'new', 'private', 'protected', 'public', 'gauge',
+ 'throw', 'throws', 'class', 'interface', 'implement', 'abstract',
+ 'extends', 'from', 'this', 'super', 'constant', 'final', 'static',
+ 'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue',
+ 'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in',
+ 'version', 'return', 'true', 'false', 'null',
+ '__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__',
+ '__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__',
+ '__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__',
+ '__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'),
+ Keyword),
+ (r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
+ r'array|multiset|program|function|lambda|mixed|'
+ r'[a-z_][a-z0-9_]*_t)\b',
+ Keyword.Type),
+ (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+ (r'[~!%^&*+=|?:<>/@-]', Operator),
+ inherit,
+ ],
+ 'classname': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+ # template specification
+ (r'\s*(?=>)', Whitespace, '#pop'),
+ ],
+ }
+
+
+class NesCLexer(CLexer):
+ """
+ For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
+ directives.
+
+ .. versionadded:: 2.0
+ """
+ name = 'nesC'
+ aliases = ['nesc']
+ filenames = ['*.nc']
+ mimetypes = ['text/x-nescsrc']
+
+ tokens = {
+ 'statements': [
+ (words((
+ 'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component',
+ 'components', 'configuration', 'event', 'extends', 'generic',
+ 'implementation', 'includes', 'interface', 'module', 'new', 'norace',
+ 'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'),
+ Keyword),
+ (words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t',
+ 'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t',
+ 'nx_uint64_t'), suffix=r'\b'),
+ Keyword.Type),
+ inherit,
+ ],
+ }
+
+
+class ClayLexer(RegexLexer):
+ """
+ For `Clay <http://claylabs.com/clay/>`_ source.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Clay'
+ filenames = ['*.clay']
+ aliases = ['clay']
+ mimetypes = ['text/x-clay']
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'//.*?$', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'\b(public|private|import|as|record|variant|instance'
+ r'|define|overload|default|external|alias'
+ r'|rvalue|ref|forward|inline|noinline|forceinline'
+ r'|enum|var|and|or|not|if|else|goto|return|while'
+ r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
+ r'|finally|onerror|staticassert|eval|when|newtype'
+ r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
+ r')\b', Keyword),
+ (r'[~!%^&*+=|:<>/-]', Operator),
+ (r'[#(){}\[\],;.]', Punctuation),
+ (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
+ (r'\d+[LlUu]*', Number.Integer),
+ (r'\b(true|false)\b', Name.Builtin),
+ (r'(?i)[a-z_?][\w?]*', Name),
+ (r'"""', String, 'tdqs'),
+ (r'"', String, 'dqs'),
+ ],
+ 'strings': [
+ (r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
+ (r'[^\\"]+', String),
+ ],
+ 'nl': [
+ (r'\n', String),
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ include('strings'),
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ include('strings'),
+ include('nl'),
+ ],
+ }
+
+
+class ECLexer(CLexer):
+ """
+ For eC source code with preprocessor directives.
+
+ .. versionadded:: 1.5
+ """
+ name = 'eC'
+ aliases = ['ec']
+ filenames = ['*.ec', '*.eh']
+ mimetypes = ['text/x-echdr', 'text/x-ecsrc']
+
+ tokens = {
+ 'statements': [
+ (words((
+ 'virtual', 'class', 'private', 'public', 'property', 'import',
+ 'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get',
+ 'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass',
+ '__on_register_module', 'namespace', 'using', 'typed_object',
+ 'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers',
+ 'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset',
+ 'class_default_property', 'property_category', 'class_data',
+ 'class_property', 'thisclass', 'dbtable', 'dbindex',
+ 'database_open', 'dbfield'), suffix=r'\b'), Keyword),
+ (words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte',
+ 'unichar', 'int64'), suffix=r'\b'),
+ Keyword.Type),
+ (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+ (r'(null|value|this)\b', Name.Builtin),
+ inherit,
+ ]
+ }
+
+
+class ValaLexer(RegexLexer):
+ """
+ For Vala source code with preprocessor directives.
+
+ .. versionadded:: 1.1
+ """
+ name = 'Vala'
+ aliases = ['vala', 'vapi']
+ filenames = ['*.vala', '*.vapi']
+ mimetypes = ['text/x-vala']
+
+ tokens = {
+ 'whitespace': [
+ (r'^\s*#if\s+0', Comment.Preproc, 'if0'),
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'\\\n', Text), # line continuation
+ (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ ],
+ 'statements': [
+ (r'[L@]?"', String, 'string'),
+ (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
+ String.Char),
+ (r'(?s)""".*?"""', String), # verbatim strings
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
+ (r'0[0-7]+[Ll]?', Number.Oct),
+ (r'\d+[Ll]?', Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
+ bygroups(Punctuation, Name.Decorator, Punctuation)),
+ # TODO: "correctly" parse complex code attributes
+ (r'(\[)(CCode|(?:Integer|Floating)Type)',
+ bygroups(Punctuation, Name.Decorator)),
+ (r'[()\[\],.]', Punctuation),
+ (words((
+ 'as', 'base', 'break', 'case', 'catch', 'construct', 'continue',
+ 'default', 'delete', 'do', 'else', 'enum', 'finally', 'for',
+ 'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params',
+ 'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try',
+ 'typeof', 'while', 'yield'), suffix=r'\b'),
+ Keyword),
+ (words((
+ 'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern',
+ 'inline', 'internal', 'override', 'owned', 'private', 'protected',
+ 'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned',
+ 'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'),
+ Keyword.Declaration),
+ (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Whitespace),
+ 'namespace'),
+ (r'(class|errordomain|interface|struct)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'class'),
+ (r'(\.)([a-zA-Z_]\w*)',
+ bygroups(Operator, Name.Attribute)),
+ # void is an actual keyword, others are in glib-2.0.vapi
+ (words((
+ 'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16',
+ 'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string',
+ 'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
+ 'ulong', 'unichar', 'ushort'), suffix=r'\b'),
+ Keyword.Type),
+ (r'(true|false|null)\b', Name.Builtin),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'root': [
+ include('whitespace'),
+ default('statement'),
+ ],
+ 'statement': [
+ include('whitespace'),
+ include('statements'),
+ ('[{}]', Punctuation),
+ (';', Punctuation, '#pop'),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
+ (r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
+ (r'.*?\n', Comment),
+ ],
+ 'class': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'namespace': [
+ (r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
+ ],
+ }
+
+
+class CudaLexer(CLexer):
+ """
+ For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
+ source.
+
+ .. versionadded:: 1.6
+ """
+ name = 'CUDA'
+ filenames = ['*.cu', '*.cuh']
+ aliases = ['cuda', 'cu']
+ mimetypes = ['text/x-cuda']
+
+ function_qualifiers = {'__device__', '__global__', '__host__',
+ '__noinline__', '__forceinline__'}
+ variable_qualifiers = {'__device__', '__constant__', '__shared__',
+ '__restrict__'}
+ vector_types = {'char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
+ 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
+ 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
+ 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
+ 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
+ 'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
+ 'ulonglong2', 'float1', 'float2', 'float3', 'float4',
+ 'double1', 'double2', 'dim3'}
+ variables = {'gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'}
+ functions = {'__threadfence_block', '__threadfence', '__threadfence_system',
+ '__syncthreads', '__syncthreads_count', '__syncthreads_and',
+ '__syncthreads_or'}
+ execution_confs = {'<<<', '>>>'}
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ for index, token, value in CLexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name:
+ if value in self.variable_qualifiers:
+ token = Keyword.Type
+ elif value in self.vector_types:
+ token = Keyword.Type
+ elif value in self.variables:
+ token = Name.Builtin
+ elif value in self.execution_confs:
+ token = Keyword.Pseudo
+ elif value in self.function_qualifiers:
+ token = Keyword.Reserved
+ elif value in self.functions:
+ token = Name.Function
+ yield index, token, value
+
+
+class SwigLexer(CppLexer):
+ """
+ For `SWIG <http://www.swig.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'SWIG'
+ aliases = ['swig']
+ filenames = ['*.swg', '*.i']
+ mimetypes = ['text/swig']
+ priority = 0.04 # Lower than C/C++ and Objective C/C++
+
+ tokens = {
+ 'root': [
+ # Match it here so it won't be matched as a function in the rest of root
+ (r'\$\**\&?\w+', Name),
+ inherit
+ ],
+ 'statements': [
+ # SWIG directives
+ (r'(%[a-z_][a-z0-9_]*)', Name.Function),
+ # Special variables
+ (r'\$\**\&?\w+', Name),
+ # Stringification / additional preprocessor directives
+ (r'##*[a-zA-Z_]\w*', Comment.Preproc),
+ inherit,
+ ],
+ }
+
+ # This is a far from complete set of SWIG directives
+ swig_directives = {
+ # Most common directives
+ '%apply', '%define', '%director', '%enddef', '%exception', '%extend',
+ '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
+ '%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
+ '%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
+ # Less common directives
+ '%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
+ '%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
+ '%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
+ '%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
+ '%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
+ '%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
+ '%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
+ '%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
+ '%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
+ '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
+ '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
+ '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn',
+ '%warnfilter'}
+
+ def analyse_text(text):
+ rv = 0
+ # Search for SWIG directives, which are conventionally at the beginning of
+ # a line. The probability of them being within a line is low, so let another
+ # lexer win in this case.
+ matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
+ for m in matches:
+ if m in SwigLexer.swig_directives:
+ rv = 0.98
+ break
+ else:
+ rv = 0.91 # Fraction higher than MatlabLexer
+ return rv
+
+
+class MqlLexer(CppLexer):
+ """
+ For `MQL4 <http://docs.mql4.com/>`_ and
+ `MQL5 <http://www.mql5.com/en/docs>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'MQL'
+ aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
+ filenames = ['*.mq4', '*.mq5', '*.mqh']
+ mimetypes = ['text/x-mql']
+
+ tokens = {
+ 'statements': [
+ (words(_mql_builtins.keywords, suffix=r'\b'), Keyword),
+ (words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type),
+ (words(_mql_builtins.types, suffix=r'\b'), Name.Function),
+ (words(_mql_builtins.constants, suffix=r'\b'), Name.Constant),
+ (words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'),
+ Name.Constant),
+ inherit,
+ ],
+ }
+
+
+class ArduinoLexer(CppLexer):
+ """
+ For `Arduino(tm) <https://arduino.cc/>`_ source.
+
+ This is an extension of the CppLexer, as the Arduino® Language is a superset
+ of C++
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Arduino'
+ aliases = ['arduino']
+ filenames = ['*.ino']
+ mimetypes = ['text/x-arduino']
+
+ # Language sketch main structure functions
+ structure = {'setup', 'loop'}
+
+ # Language operators
+ operators = {'not', 'or', 'and', 'xor'}
+
+ # Language 'variables'
+ variables = {
+ 'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL',
+ 'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET',
+ 'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH',
+ 'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false',
+ 'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int',
+ 'word', 'long', 'unsigned long', 'short', 'float', 'double', 'string', 'String',
+ 'array', 'static', 'volatile', 'const', 'boolean', 'byte', 'word', 'string',
+ 'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator',
+ 'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t',
+ 'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const',
+ 'struct', 'static_cast', 'union', 'unsigned', 'long', 'volatile', 'static',
+ 'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern',
+ 'class', 'short', 'reinterpret_cast', 'double', 'register', 'explicit',
+ 'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary',
+ 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
+ 'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
+ 'atomic_llong', 'atomic_ullong', 'PROGMEM'}
+
+ # Language shipped functions and class ( )
+ functions = {
+ 'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer',
+ 'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall',
+ 'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient',
+ 'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
+ 'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console',
+ 'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox',
+ 'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO',
+ 'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File',
+ 'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
+ 'runShellCommandAsynchronously', 'analogWriteResolution',
+ 'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution',
+ 'sendDigitalPortPair', 'noListenOnLocalhost', 'readJoystickButton',
+ 'setFirmwareVersion', 'readJoystickSwitch', 'scrollDisplayRight',
+ 'getVoiceCallStatus', 'scrollDisplayLeft', 'writeMicroseconds',
+ 'delayMicroseconds', 'beginTransmission', 'getSignalStrength',
+ 'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost',
+ 'getCurrentCarrier', 'readAccelerometer', 'messageAvailable',
+ 'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
+ 'readStringUntil', 'rewindDirectory', 'readTemperature', 'setClockDivider',
+ 'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt',
+ 'countryNameRead', 'attachInterrupt', 'encryptionType', 'readBytesUntil',
+ 'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite',
+ 'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased',
+ 'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker',
+ 'mousePressed', 'isActionDone', 'mouseDragged', 'displayLogos', 'noAutoscroll',
+ 'addParameter', 'remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead',
+ 'waitContinue', 'processInput', 'parseCommand', 'printVersion', 'readNetworks',
+ 'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage', 'setDataMode',
+ 'parsePacket', 'isListening', 'setBitOrder', 'beginPacket', 'isDirectory',
+ 'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen', 'serialEvent',
+ 'rightToLeft', 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased',
+ 'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect',
+ 'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed', 'setPINUsed',
+ 'setTimeout', 'sendAnalog', 'readSlider', 'analogRead', 'beginWrite',
+ 'createChar', 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton',
+ 'subnetMask', 'debugPrint', 'macAddress', 'writeGreen', 'randomSeed',
+ 'attachGPRS', 'readString', 'sendString', 'remotePort', 'releaseAll',
+ 'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall',
+ 'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
+ 'getButton', 'available', 'connected', 'findUntil', 'readBytes', 'exitValue',
+ 'readGreen', 'writeBlue', 'startLoop', 'IPAddress', 'isPressed', 'sendSysex',
+ 'pauseMode', 'gatewayIP', 'setCursor', 'getOemKey', 'tuneWrite', 'noDisplay',
+ 'loadImage', 'switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile',
+ 'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
+ 'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte',
+ 'writeRed', 'setSpeed', 'readBlue', 'noStroke', 'remoteIP', 'transfer',
+ 'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached', 'maintain',
+ 'noCursor', 'checkReg', 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn',
+ 'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink',
+ 'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte', 'setBand',
+ 'release', 'bitRead', 'prepare', 'pointTo', 'readRed', 'setMode', 'noFill',
+ 'remove', 'listen', 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
+ 'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread', 'setDNS',
+ 'endSMS', 'getKey', 'micros', 'millis', 'begin', 'print', 'write', 'ready',
+ 'flush', 'width', 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close',
+ 'point', 'yield', 'image', 'BSSID', 'click', 'delay', 'read', 'text', 'move',
+ 'peek', 'beep', 'rect', 'line', 'open', 'seek', 'fill', 'size', 'turn', 'stop',
+ 'home', 'find', 'step', 'tone', 'sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan',
+ 'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put',
+ 'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit',
+ 'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase',
+ 'isHexadecimalDigit'}
+
+ # do not highlight
+ suppress_highlight = {
+ 'namespace', 'template', 'mutable', 'using', 'asm', 'typeid',
+ 'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept',
+ 'static_assert', 'thread_local', 'restrict'}
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack):
+ if value in self.structure:
+ yield index, Name.Builtin, value
+ elif value in self.operators:
+ yield index, Operator, value
+ elif value in self.variables:
+ yield index, Keyword.Reserved, value
+ elif value in self.suppress_highlight:
+ yield index, Name, value
+ elif value in self.functions:
+ yield index, Name.Function, value
+ else:
+ yield index, token, value
+
+
+class CharmciLexer(CppLexer):
+ """
+ For `Charm++ <https://charm.cs.illinois.edu>`_ interface files (.ci).
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Charmci'
+ aliases = ['charmci']
+ filenames = ['*.ci']
+
+ mimetypes = []
+
+ tokens = {
+ 'keywords': [
+ (r'(module)(\s+)', bygroups(Keyword, Text), 'classname'),
+ (words(('mainmodule', 'mainchare', 'chare', 'array', 'group',
+ 'nodegroup', 'message', 'conditional')), Keyword),
+ (words(('entry', 'aggregate', 'threaded', 'sync', 'exclusive',
+ 'nokeep', 'notrace', 'immediate', 'expedited', 'inline',
+ 'local', 'python', 'accel', 'readwrite', 'writeonly',
+ 'accelblock', 'memcritical', 'packed', 'varsize',
+ 'initproc', 'initnode', 'initcall', 'stacksize',
+ 'createhere', 'createhome', 'reductiontarget', 'iget',
+ 'nocopy', 'mutable', 'migratable', 'readonly')), Keyword),
+ inherit,
+ ],
+ }
+
+
+class OmgIdlLexer(CLexer):
+ """
+ Lexer for Object Management Group Interface Definition Language.
+
+ .. versionadded:: 2.9
+ """
+
+ name = 'OMG Interface Definition Language'
+ url = 'https://www.omg.org/spec/IDL/About-IDL/'
+ aliases = ['omg-idl']
+ filenames = ['*.idl', '*.pidl']
+ mimetypes = []
+
+ scoped_name = r'((::)?\w+)+'
+
+ tokens = {
+ 'values': [
+ (words(('true', 'false'), prefix=r'(?i)', suffix=r'\b'), Number),
+ (r'([Ll]?)(")', bygroups(String.Affix, String.Double), 'string'),
+ (r'([Ll]?)(\')(\\[^\']+)(\')',
+ bygroups(String.Affix, String.Char, String.Escape, String.Char)),
+ (r'([Ll]?)(\')(\\\')(\')',
+ bygroups(String.Affix, String.Char, String.Escape, String.Char)),
+ (r'([Ll]?)(\'.\')', bygroups(String.Affix, String.Char)),
+ (r'[+-]?\d+(\.\d*)?[Ee][+-]?\d+', Number.Float),
+ (r'[+-]?(\d+\.\d*)|(\d*\.\d+)([Ee][+-]?\d+)?', Number.Float),
+ (r'(?i)[+-]?0x[0-9a-f]+', Number.Hex),
+ (r'[+-]?[1-9]\d*', Number.Integer),
+ (r'[+-]?0[0-7]*', Number.Oct),
+ (r'[\+\-\*\/%^&\|~]', Operator),
+ (words(('<<', '>>')), Operator),
+ (scoped_name, Name),
+ (r'[{};:,<>\[\]]', Punctuation),
+ ],
+ 'annotation_params': [
+ include('whitespace'),
+ (r'\(', Punctuation, '#push'),
+ include('values'),
+ (r'=', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'annotation_params_maybe': [
+ (r'\(', Punctuation, 'annotation_params'),
+ include('whitespace'),
+ default('#pop'),
+ ],
+ 'annotation_appl': [
+ (r'@' + scoped_name, Name.Decorator, 'annotation_params_maybe'),
+ ],
+ 'enum': [
+ include('whitespace'),
+ (r'[{,]', Punctuation),
+ (r'\w+', Name.Constant),
+ include('annotation_appl'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'root': [
+ include('whitespace'),
+ (words((
+ 'typedef', 'const',
+ 'in', 'out', 'inout', 'local',
+ ), prefix=r'(?i)', suffix=r'\b'), Keyword.Declaration),
+ (words((
+ 'void', 'any', 'native', 'bitfield',
+ 'unsigned', 'boolean', 'char', 'wchar', 'octet', 'short', 'long',
+ 'int8', 'uint8', 'int16', 'int32', 'int64', 'uint16', 'uint32', 'uint64',
+ 'float', 'double', 'fixed',
+ 'sequence', 'string', 'wstring', 'map',
+ ), prefix=r'(?i)', suffix=r'\b'), Keyword.Type),
+ (words((
+ '@annotation', 'struct', 'union', 'bitset', 'interface',
+ 'exception', 'valuetype', 'eventtype', 'component',
+ ), prefix=r'(?i)', suffix=r'(\s+)(\w+)'), bygroups(Keyword, Whitespace, Name.Class)),
+ (words((
+ 'abstract', 'alias', 'attribute', 'case', 'connector',
+ 'consumes', 'context', 'custom', 'default', 'emits', 'factory',
+ 'finder', 'getraises', 'home', 'import', 'manages', 'mirrorport',
+ 'multiple', 'Object', 'oneway', 'primarykey', 'private', 'port',
+ 'porttype', 'provides', 'public', 'publishes', 'raises',
+ 'readonly', 'setraises', 'supports', 'switch', 'truncatable',
+ 'typeid', 'typename', 'typeprefix', 'uses', 'ValueBase',
+ ), prefix=r'(?i)', suffix=r'\b'), Keyword),
+ (r'(?i)(enum|bitmask)(\s+)(\w+)',
+ bygroups(Keyword, Whitespace, Name.Class), 'enum'),
+ (r'(?i)(module)(\s+)(\w+)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+ (r'(\w+)(\s*)(=)', bygroups(Name.Constant, Whitespace, Operator)),
+ (r'[\(\)]', Punctuation),
+ include('values'),
+ include('annotation_appl'),
+ ],
+ }
diff --git a/pygments/lexers/capnproto.py b/pygments/lexers/capnproto.py
new file mode 100644
index 0000000..1810796
--- /dev/null
+++ b/pygments/lexers/capnproto.py
@@ -0,0 +1,75 @@
+"""
+ pygments.lexers.capnproto
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Cap'n Proto schema language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, default
+from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace
+
+__all__ = ['CapnProtoLexer']
+
+
+class CapnProtoLexer(RegexLexer):
+ """
+ For Cap'n Proto source.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Cap\'n Proto'
+ url = 'https://capnproto.org'
+ filenames = ['*.capnp']
+ aliases = ['capnp']
+
+ tokens = {
+ 'root': [
+ (r'#.*?$', Comment.Single),
+ (r'@[0-9a-zA-Z]*', Name.Decorator),
+ (r'=', Literal, 'expression'),
+ (r':', Name.Class, 'type'),
+ (r'\$', Name.Attribute, 'annotation'),
+ (r'(struct|enum|interface|union|import|using|const|annotation|'
+ r'extends|in|of|on|as|with|from|fixed)\b',
+ Keyword),
+ (r'[\w.]+', Name),
+ (r'[^#@=:$\w\s]+', Text),
+ (r'\s+', Whitespace),
+ ],
+ 'type': [
+ (r'[^][=;,(){}$]+', Name.Class),
+ (r'[\[(]', Name.Class, 'parentype'),
+ default('#pop'),
+ ],
+ 'parentype': [
+ (r'[^][;()]+', Name.Class),
+ (r'[\[(]', Name.Class, '#push'),
+ (r'[])]', Name.Class, '#pop'),
+ default('#pop'),
+ ],
+ 'expression': [
+ (r'[^][;,(){}$]+', Literal),
+ (r'[\[(]', Literal, 'parenexp'),
+ default('#pop'),
+ ],
+ 'parenexp': [
+ (r'[^][;()]+', Literal),
+ (r'[\[(]', Literal, '#push'),
+ (r'[])]', Literal, '#pop'),
+ default('#pop'),
+ ],
+ 'annotation': [
+ (r'[^][;,(){}=:]+', Name.Attribute),
+ (r'[\[(]', Name.Attribute, 'annexp'),
+ default('#pop'),
+ ],
+ 'annexp': [
+ (r'[^][;()]+', Name.Attribute),
+ (r'[\[(]', Name.Attribute, '#push'),
+ (r'[])]', Name.Attribute, '#pop'),
+ default('#pop'),
+ ],
+ }
diff --git a/pygments/lexers/cddl.py b/pygments/lexers/cddl.py
new file mode 100644
index 0000000..e55b655
--- /dev/null
+++ b/pygments/lexers/cddl.py
@@ -0,0 +1,173 @@
+"""
+ pygments.lexers.cddl
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Concise data definition language (CDDL), a notational
+ convention to express CBOR and JSON data structures.
+
+ More information:
+ https://datatracker.ietf.org/doc/rfc8610/
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, words
+from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
+ Punctuation, String, Whitespace
+
+__all__ = ['CddlLexer']
+
+
+class CddlLexer(RegexLexer):
+ """
+ Lexer for CDDL definitions.
+
+ .. versionadded:: 2.8
+ """
+ name = "CDDL"
+ url = 'https://datatracker.ietf.org/doc/rfc8610/'
+ aliases = ["cddl"]
+ filenames = ["*.cddl"]
+ mimetypes = ["text/x-cddl"]
+
+ _prelude_types = [
+ "any",
+ "b64legacy",
+ "b64url",
+ "bigfloat",
+ "bigint",
+ "bignint",
+ "biguint",
+ "bool",
+ "bstr",
+ "bytes",
+ "cbor-any",
+ "decfrac",
+ "eb16",
+ "eb64legacy",
+ "eb64url",
+ "encoded-cbor",
+ "false",
+ "float",
+ "float16",
+ "float16-32",
+ "float32",
+ "float32-64",
+ "float64",
+ "int",
+ "integer",
+ "mime-message",
+ "nil",
+ "nint",
+ "null",
+ "number",
+ "regexp",
+ "tdate",
+ "text",
+ "time",
+ "true",
+ "tstr",
+ "uint",
+ "undefined",
+ "unsigned",
+ "uri",
+ ]
+
+ _controls = [
+ ".and",
+ ".bits",
+ ".cbor",
+ ".cborseq",
+ ".default",
+ ".eq",
+ ".ge",
+ ".gt",
+ ".le",
+ ".lt",
+ ".ne",
+ ".regexp",
+ ".size",
+ ".within",
+ ]
+
+ _re_id = (
+ r"[$@A-Z_a-z]"
+ r"(?:[\-\.]+(?=[$@0-9A-Z_a-z])|[$@0-9A-Z_a-z])*"
+
+ )
+
+ # While the spec reads more like "an int must not start with 0" we use a
+ # lookahead here that says "after a 0 there must be no digit". This makes the
+ # '0' the invalid character in '01', which looks nicer when highlighted.
+ _re_uint = r"(?:0b[01]+|0x[0-9a-fA-F]+|[1-9]\d*|0(?!\d))"
+ _re_int = r"-?" + _re_uint
+
+ tokens = {
+ "commentsandwhitespace": [(r"\s+", Whitespace), (r";.+$", Comment.Single)],
+ "root": [
+ include("commentsandwhitespace"),
+ # tag types
+ (r"#(\d\.{uint})?".format(uint=_re_uint), Keyword.Type), # type or any
+ # occurrence
+ (
+ r"({uint})?(\*)({uint})?".format(uint=_re_uint),
+ bygroups(Number, Operator, Number),
+ ),
+ (r"\?|\+", Operator), # occurrence
+ (r"\^", Operator), # cuts
+ (r"(\.\.\.|\.\.)", Operator), # rangeop
+ (words(_controls, suffix=r"\b"), Operator.Word), # ctlops
+ # into choice op
+ (r"&(?=\s*({groupname}|\())".format(groupname=_re_id), Operator),
+ (r"~(?=\s*{})".format(_re_id), Operator), # unwrap op
+ (r"//|/(?!/)", Operator), # double und single slash
+ (r"=>|/==|/=|=", Operator),
+ (r"[\[\]{}\(\),<>:]", Punctuation),
+ # Bytestrings
+ (r"(b64)(')", bygroups(String.Affix, String.Single), "bstrb64url"),
+ (r"(h)(')", bygroups(String.Affix, String.Single), "bstrh"),
+ (r"'", String.Single, "bstr"),
+ # Barewords as member keys (must be matched before values, types, typenames,
+ # groupnames).
+ # Token type is String as barewords are always interpreted as such.
+ (r"({bareword})(\s*)(:)".format(bareword=_re_id),
+ bygroups(String, Whitespace, Punctuation)),
+ # predefined types
+ (words(_prelude_types, prefix=r"(?![\-_$@])\b", suffix=r"\b(?![\-_$@])"),
+ Name.Builtin),
+ # user-defined groupnames, typenames
+ (_re_id, Name.Class),
+ # values
+ (r"0b[01]+", Number.Bin),
+ (r"0o[0-7]+", Number.Oct),
+ (r"0x[0-9a-fA-F]+(\.[0-9a-fA-F]+)?p[+-]?\d+", Number.Hex), # hexfloat
+ (r"0x[0-9a-fA-F]+", Number.Hex), # hex
+ # Float
+ (r"{int}(?=(\.\d|e[+-]?\d))(?:\.\d+)?(?:e[+-]?\d+)?".format(int=_re_int),
+ Number.Float),
+ # Int
+ (_re_int, Number.Integer),
+ (r'"(\\\\|\\"|[^"])*"', String.Double),
+ ],
+ "bstrb64url": [
+ (r"'", String.Single, "#pop"),
+ include("commentsandwhitespace"),
+ (r"\\.", String.Escape),
+ (r"[0-9a-zA-Z\-_=]+", String.Single),
+ (r".", Error),
+ # (r";.+$", Token.Other),
+ ],
+ "bstrh": [
+ (r"'", String.Single, "#pop"),
+ include("commentsandwhitespace"),
+ (r"\\.", String.Escape),
+ (r"[0-9a-fA-F]+", String.Single),
+ (r".", Error),
+ ],
+ "bstr": [
+ (r"'", String.Single, "#pop"),
+ (r"\\.", String.Escape),
+ (r"[^'\\]+", String.Single),
+ ],
+ }
diff --git a/pygments/lexers/chapel.py b/pygments/lexers/chapel.py
new file mode 100644
index 0000000..908d391
--- /dev/null
+++ b/pygments/lexers/chapel.py
@@ -0,0 +1,136 @@
+"""
+ pygments.lexers.chapel
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Chapel language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['ChapelLexer']
+
+
+class ChapelLexer(RegexLexer):
+ """
+ For Chapel source.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Chapel'
+ url = 'https://chapel-lang.org/'
+ filenames = ['*.chpl']
+ aliases = ['chapel', 'chpl']
+ # mimetypes = ['text/x-chapel']
+
+ known_types = ('bool', 'bytes', 'complex', 'imag', 'int', 'locale',
+ 'nothing', 'opaque', 'range', 'real', 'string', 'uint',
+ 'void')
+
+ type_modifiers_par = ('atomic', 'single', 'sync')
+ type_modifiers_mem = ('borrowed', 'owned', 'shared', 'unmanaged')
+ type_modifiers = (*type_modifiers_par, *type_modifiers_mem)
+
+ declarations = ('config', 'const', 'in', 'inout', 'out', 'param', 'ref',
+ 'type', 'var')
+
+ constants = ('false', 'nil', 'none', 'true')
+
+ other_keywords = ('align', 'as',
+ 'begin', 'break', 'by',
+ 'catch', 'cobegin', 'coforall', 'continue',
+ 'defer', 'delete', 'dmapped', 'do', 'domain',
+ 'else', 'enum', 'except', 'export', 'extern',
+ 'for', 'forall', 'foreach', 'forwarding',
+ 'if', 'implements', 'import', 'index', 'init', 'inline',
+ 'label', 'lambda', 'let', 'lifetime', 'local',
+ 'new', 'noinit',
+ 'on', 'only', 'otherwise', 'override',
+ 'pragma', 'primitive', 'private', 'prototype', 'public',
+ 'reduce', 'require', 'return',
+ 'scan', 'select', 'serial', 'sparse', 'subdomain',
+ 'then', 'this', 'throw', 'throws', 'try',
+ 'use',
+ 'when', 'where', 'while', 'with',
+ 'yield',
+ 'zip')
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'\\\n', Text),
+
+ (r'//(.*?)\n', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+
+ (words(declarations, suffix=r'\b'), Keyword.Declaration),
+ (words(constants, suffix=r'\b'), Keyword.Constant),
+ (words(known_types, suffix=r'\b'), Keyword.Type),
+ (words((*type_modifiers, *other_keywords), suffix=r'\b'), Keyword),
+
+ (r'(iter)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
+ (r'(proc)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
+ (r'(operator)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
+ (r'(class|interface|module|record|union)(\s+)', bygroups(Keyword, Whitespace),
+ 'classname'),
+
+ # imaginary integers
+ (r'\d+i', Number),
+ (r'\d+\.\d*([Ee][-+]\d+)?i', Number),
+ (r'\.\d+([Ee][-+]\d+)?i', Number),
+ (r'\d+[Ee][-+]\d+i', Number),
+
+ # reals cannot end with a period due to lexical ambiguity with
+ # .. operator. See reference for rationale.
+ (r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+i?', Number.Float),
+
+ # integer literals
+ # -- binary
+ (r'0[bB][01]+', Number.Bin),
+ # -- hex
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ # -- octal
+ (r'0[oO][0-7]+', Number.Oct),
+ # -- decimal
+ (r'[0-9]+', Number.Integer),
+
+ # strings
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r"'(\\\\|\\'|[^'])*'", String),
+
+ # tokens
+ (r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
+ r'<=>|<~>|\.\.|by|#|\.\.\.|'
+ r'&&|\|\||!|&|\||\^|~|<<|>>|'
+ r'==|!=|<=|>=|<|>|'
+ r'[+\-*/%]|\*\*)', Operator),
+ (r'[:;,.?()\[\]{}]', Punctuation),
+
+ # identifiers
+ (r'[a-zA-Z_][\w$]*', Name.Other),
+ ],
+ 'classname': [
+ (r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
+ ],
+ 'procname': [
+ (r'([a-zA-Z_][.\w$]*|' # regular function name, including secondary
+ r'\~[a-zA-Z_][.\w$]*|' # support for legacy destructors
+ r'[+*/!~%<>=&^|\-:]{1,2})', # operators
+ Name.Function, '#pop'),
+
+ # allow `proc (atomic T).foo`
+ (r'\(', Punctuation, "receivertype"),
+ (r'\)+\.', Punctuation),
+ ],
+ 'receivertype': [
+ (words(type_modifiers, suffix=r'\b'), Keyword),
+ (words(known_types, suffix=r'\b'), Keyword.Type),
+ (r'[^()]*', Name.Other, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/clean.py b/pygments/lexers/clean.py
new file mode 100644
index 0000000..ff2bf6f
--- /dev/null
+++ b/pygments/lexers/clean.py
@@ -0,0 +1,179 @@
+"""
+ pygments.lexers.clean
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Clean language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import ExtendedRegexLexer, words, default, include, bygroups
+from pygments.token import Comment, Error, Keyword, Literal, Name, Number, \
+ Operator, Punctuation, String, Whitespace
+
+__all__ = ['CleanLexer']
+
+
+class CleanLexer(ExtendedRegexLexer):
+ """
+ Lexer for the general purpose, state-of-the-art, pure and lazy functional
+ programming language Clean.
+
+ .. versionadded: 2.2
+ """
+ name = 'Clean'
+ url = 'http://clean.cs.ru.nl/Clean'
+ aliases = ['clean']
+ filenames = ['*.icl', '*.dcl']
+
+ keywords = (
+ 'case', 'ccall', 'class', 'code', 'code inline', 'derive', 'export',
+ 'foreign', 'generic', 'if', 'in', 'infix', 'infixl', 'infixr',
+ 'instance', 'let', 'of', 'otherwise', 'special', 'stdcall', 'where',
+ 'with')
+
+ modulewords = ('implementation', 'definition', 'system')
+
+ lowerId = r'[a-z`][\w`]*'
+ upperId = r'[A-Z`][\w`]*'
+ funnyId = r'[~@#$%\^?!+\-*<>\\/|&=:]+'
+ scoreUpperId = r'_' + upperId
+ scoreLowerId = r'_' + lowerId
+ moduleId = r'[a-zA-Z_][a-zA-Z0-9_.`]+'
+ classId = '|'.join([lowerId, upperId, funnyId])
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('keywords'),
+ include('module'),
+ include('import'),
+ include('whitespace'),
+ include('literals'),
+ include('operators'),
+ include('delimiters'),
+ include('names'),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ 'comments': [
+ (r'//.*\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comments.in'),
+ (r'/\*\*', Comment.Special, 'comments.in'),
+ ],
+ 'comments.in': [
+ (r'\*\/', Comment.Multiline, '#pop'),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'[^*/]+', Comment.Multiline),
+ (r'\*(?!/)', Comment.Multiline),
+ (r'/', Comment.Multiline),
+ ],
+ 'keywords': [
+ (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
+ ],
+ 'module': [
+ (words(modulewords, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
+ (r'\bmodule\b', Keyword.Namespace, 'module.name'),
+ ],
+ 'module.name': [
+ include('whitespace'),
+ (moduleId, Name.Class, '#pop'),
+ ],
+ 'import': [
+ (r'\b(import)\b(\s*)', bygroups(Keyword, Whitespace), 'import.module'),
+ (r'\b(from)\b(\s*)\b(' + moduleId + r')\b(\s*)\b(import)\b',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword),
+ 'import.what'),
+ ],
+ 'import.module': [
+ (r'\b(qualified)\b(\s*)', bygroups(Keyword, Whitespace)),
+ (r'(\s*)\b(as)\b', bygroups(Whitespace, Keyword), ('#pop', 'import.module.as')),
+ (moduleId, Name.Class),
+ (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
+ (r'\s+', Whitespace),
+ default('#pop'),
+ ],
+ 'import.module.as': [
+ include('whitespace'),
+ (lowerId, Name.Class, '#pop'),
+ (upperId, Name.Class, '#pop'),
+ ],
+ 'import.what': [
+ (r'\b(class)\b(\s+)(' + classId + r')',
+ bygroups(Keyword, Whitespace, Name.Class), 'import.what.class'),
+ (r'\b(instance)(\s+)(' + classId + r')(\s+)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'import.what.instance'),
+ (r'(::)(\s*)\b(' + upperId + r')\b',
+ bygroups(Punctuation, Whitespace, Name.Class), 'import.what.type'),
+ (r'\b(generic)\b(\s+)\b(' + lowerId + '|' + upperId + r')\b',
+ bygroups(Keyword, Whitespace, Name)),
+ include('names'),
+ (r'(,)(\s+)', bygroups(Punctuation, Whitespace)),
+ (r'$', Whitespace, '#pop'),
+ include('whitespace'),
+ ],
+ 'import.what.class': [
+ (r',', Punctuation, '#pop'),
+ (r'\(', Punctuation, 'import.what.class.members'),
+ (r'$', Whitespace, '#pop:2'),
+ include('whitespace'),
+ ],
+ 'import.what.class.members': [
+ (r',', Punctuation),
+ (r'\.\.', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ include('names'),
+ ],
+ 'import.what.instance': [
+ (r'[,)]', Punctuation, '#pop'),
+ (r'\(', Punctuation, 'import.what.instance'),
+ (r'$', Whitespace, '#pop:2'),
+ include('whitespace'),
+ include('names'),
+ ],
+ 'import.what.type': [
+ (r',', Punctuation, '#pop'),
+ (r'[({]', Punctuation, 'import.what.type.consesandfields'),
+ (r'$', Whitespace, '#pop:2'),
+ include('whitespace'),
+ ],
+ 'import.what.type.consesandfields': [
+ (r',', Punctuation),
+ (r'\.\.', Punctuation),
+ (r'[)}]', Punctuation, '#pop'),
+ include('names'),
+ ],
+ 'literals': [
+ (r'\'([^\'\\]|\\(x[\da-fA-F]+|\d+|.))\'', Literal.Char),
+ (r'[+~-]?0[0-7]+\b', Number.Oct),
+ (r'[+~-]?\d+\.\d+(E[+-]?\d+)?', Number.Float),
+ (r'[+~-]?\d+\b', Number.Integer),
+ (r'[+~-]?0x[\da-fA-F]+\b', Number.Hex),
+ (r'True|False', Literal),
+ (r'"', String.Double, 'literals.stringd'),
+ ],
+ 'literals.stringd': [
+ (r'[^\\"\n]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\\.', String.Double),
+ (r'[$\n]', Error, '#pop'),
+ ],
+ 'operators': [
+ (r'[-~@#$%\^?!+*<>\\/|&=:.]+', Operator),
+ (r'\b_+\b', Operator),
+ ],
+ 'delimiters': [
+ (r'[,;(){}\[\]]', Punctuation),
+ (r'(\')([\w`.]+)(\')',
+ bygroups(Punctuation, Name.Class, Punctuation)),
+ ],
+ 'names': [
+ (lowerId, Name),
+ (scoreLowerId, Name),
+ (funnyId, Name.Function),
+ (upperId, Name.Class),
+ (scoreUpperId, Name.Class),
+ ]
+ }
diff --git a/pygments/lexers/comal.py b/pygments/lexers/comal.py
new file mode 100644
index 0000000..258d32e
--- /dev/null
+++ b/pygments/lexers/comal.py
@@ -0,0 +1,80 @@
+"""
+ pygments.lexers.comal
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for COMAL-80.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Whitespace, Operator, Keyword, String, \
+ Number, Name, Punctuation
+
+__all__ = ["Comal80Lexer"]
+
+
+class Comal80Lexer(RegexLexer):
+ """
+ For COMAL-80 source code.
+ """
+
+ name = 'COMAL-80'
+ url = 'https://en.wikipedia.org/wiki/COMAL'
+ aliases = ['comal', 'comal80']
+ filenames = ['*.cml', '*.comal']
+ flags = re.IGNORECASE
+ #
+ # COMAL allows for some strange characters in names which we list here so
+ # keywords and word operators will not be recognized at the start of an
+ # identifier.
+ #
+ _suffix = r"\b(?!['\[\]←£\\])"
+ _identifier = r"[a-z]['\[\]←£\\\w]*"
+
+ tokens = {
+ 'root': [
+ (r'//.*\n', Comment.Single),
+ (r'\s+', Whitespace),
+ (r':[=+-]|\<\>|[-+*/^↑<>=]', Operator),
+ (r'(and +then|or +else)' + _suffix, Operator.Word),
+ (words([
+ 'and', 'bitand', 'bitor', 'bitxor', 'div', 'in', 'mod', 'not',
+ 'or'], suffix=_suffix,), Operator.Word),
+ (words([
+ 'append', 'at', 'case', 'chain', 'close', 'copy', 'create', 'cursor',
+ 'data', 'delete', 'dir', 'do', 'elif', 'else', 'end', 'endcase', 'endif',
+ 'endfor', 'endloop', 'endtrap', 'endwhile', 'exec', 'exit', 'file',
+ 'for', 'goto', 'handler', 'if', 'input', 'let', 'loop', 'mount', 'null',
+ 'of', 'open', 'otherwise', 'output', 'page', 'pass', 'poke', 'print',
+ 'random', 'read', 'repeat', 'report', 'return', 'rename', 'restore',
+ 'select', 'step', 'stop', 'sys', 'then', 'to', 'trap', 'unit', 'unit$',
+ 'until', 'using', 'when', 'while', 'write', 'zone'], suffix=_suffix),
+ Keyword.Reserved),
+ (words([
+ 'closed', 'dim', 'endfunc', 'endproc', 'external', 'func', 'import',
+ 'proc', 'ref', 'use'], suffix=_suffix), Keyword.Declaration),
+ (words([
+ 'abs', 'atn', 'chr$', 'cos', 'eod', 'eof', 'err', 'errfile', 'errtext',
+ 'esc', 'exp', 'int', 'key$', 'len', 'log', 'ord', 'peek', 'randomize',
+ 'rnd', 'sgn', 'sin', 'spc$', 'sqr', 'status$', 'str$', 'tab', 'tan',
+ 'time', 'val'], suffix=_suffix), Name.Builtin),
+ (words(['false', 'pi', 'true'], suffix=_suffix), Keyword.Constant),
+ (r'"', String, 'string'),
+ (_identifier + r":(?=[ \n/])", Name.Label),
+ (_identifier + r"[$#]?", Name),
+ (r'%[01]+', Number.Bin),
+ (r'\$[0-9a-f]+', Number.Hex),
+ (r'\d*\.\d*(e[-+]?\d+)?', Number.Float),
+ (r'\d+', Number.Integer),
+ (r'[(),:;]', Punctuation),
+ ],
+ 'string': [
+ (r'[^"]+', String),
+ (r'"[0-9]*"', String.Escape),
+ (r'"', String, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/compiled.py b/pygments/lexers/compiled.py
new file mode 100644
index 0000000..eaf6fd3
--- /dev/null
+++ b/pygments/lexers/compiled.py
@@ -0,0 +1,34 @@
+"""
+ pygments.lexers.compiled
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Just export lexer classes previously contained in this module.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.jvm import JavaLexer, ScalaLexer
+from pygments.lexers.c_cpp import CLexer, CppLexer
+from pygments.lexers.d import DLexer
+from pygments.lexers.objective import ObjectiveCLexer, \
+ ObjectiveCppLexer, LogosLexer
+from pygments.lexers.go import GoLexer
+from pygments.lexers.rust import RustLexer
+from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer
+from pygments.lexers.pascal import DelphiLexer, PortugolLexer, Modula2Lexer
+from pygments.lexers.ada import AdaLexer
+from pygments.lexers.business import CobolLexer, CobolFreeformatLexer
+from pygments.lexers.fortran import FortranLexer
+from pygments.lexers.prolog import PrologLexer
+from pygments.lexers.python import CythonLexer
+from pygments.lexers.graphics import GLShaderLexer
+from pygments.lexers.ml import OcamlLexer
+from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer
+from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer
+from pygments.lexers.ooc import OocLexer
+from pygments.lexers.felix import FelixLexer
+from pygments.lexers.nimrod import NimrodLexer
+from pygments.lexers.crystal import CrystalLexer
+
+__all__ = []
diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py
new file mode 100644
index 0000000..e04c722
--- /dev/null
+++ b/pygments/lexers/configs.py
@@ -0,0 +1,1174 @@
+"""
+ pygments.lexers.configs
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for configuration file formats.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import ExtendedRegexLexer, RegexLexer, default, words, \
+ bygroups, include, using, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace, Literal, Error, Generic
+from pygments.lexers.shell import BashLexer
+from pygments.lexers.data import JsonLexer
+
+__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
+ 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
+ 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
+ 'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
+ 'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer',
+ 'NestedTextLexer', 'SingularityLexer', 'UnixConfigLexer']
+
+
+class IniLexer(RegexLexer):
+ """
+ Lexer for configuration files in INI style.
+ """
+
+ name = 'INI'
+ aliases = ['ini', 'cfg', 'dosini']
+ filenames = [
+ '*.ini', '*.cfg', '*.inf', '.editorconfig',
+ # systemd unit files
+ # https://www.freedesktop.org/software/systemd/man/systemd.unit.html
+ '*.service', '*.socket', '*.device', '*.mount', '*.automount',
+ '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope',
+ ]
+ mimetypes = ['text/x-ini', 'text/inf']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'[;#].*', Comment.Single),
+ (r'(\[.*?\])([ \t]*)$', bygroups(Keyword, Whitespace)),
+ (r'(.*?)([  \t]*)([=:])([ \t]*)([^;#\n]*)(\\)(\s+)',
+ bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String,
+ Text, Whitespace),
+ "value"),
+ (r'(.*?)([ \t]*)([=:])([  \t]*)([^ ;#\n]*(?: +[^ ;#\n]+)*)',
+ bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String)),
+ # standalone option, supported by some INI parsers
+ (r'(.+?)$', Name.Attribute),
+ ],
+ 'value': [ # line continuation
+ (r'\s+', Whitespace),
+ (r'(\s*)(.*)(\\)([ \t]*)',
+ bygroups(Whitespace, String, Text, Whitespace)),
+ (r'.*$', String, "#pop"),
+ ],
+ }
+
+ def analyse_text(text):
+ npos = text.find('\n')
+ if npos < 3:
+ return False
+ return text[0] == '[' and text[npos-1] == ']'
+
+
+class RegeditLexer(RegexLexer):
+ """
+ Lexer for Windows Registry files produced by regedit.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'reg'
+ url = 'http://en.wikipedia.org/wiki/Windows_Registry#.REG_files'
+ aliases = ['registry']
+ filenames = ['*.reg']
+ mimetypes = ['text/x-windows-registry']
+
+ tokens = {
+ 'root': [
+ (r'Windows Registry Editor.*', Text),
+ (r'\s+', Whitespace),
+ (r'[;#].*', Comment.Single),
+ (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
+ bygroups(Keyword, Operator, Name.Builtin, Keyword)),
+ # String keys, which obey somewhat normal escaping
+ (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
+ bygroups(Name.Attribute, Whitespace, Operator, Whitespace),
+ 'value'),
+ # Bare keys (includes @)
+ (r'(.*?)([ \t]*)(=)([ \t]*)',
+ bygroups(Name.Attribute, Whitespace, Operator, Whitespace),
+ 'value'),
+ ],
+ 'value': [
+ (r'-', Operator, '#pop'), # delete value
+ (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
+ bygroups(Name.Variable, Punctuation, Number), '#pop'),
+ # As far as I know, .reg files do not support line continuation.
+ (r'.+', String, '#pop'),
+ default('#pop'),
+ ]
+ }
+
+ def analyse_text(text):
+ return text.startswith('Windows Registry Editor')
+
+
+class PropertiesLexer(RegexLexer):
+ """
+ Lexer for configuration files in Java's properties format.
+
+ Note: trailing whitespace counts as part of the value as per spec
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'Properties'
+ aliases = ['properties', 'jproperties']
+ filenames = ['*.properties']
+ mimetypes = ['text/x-java-properties']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'[!#].*|/{2}.*', Comment.Single),
+ # search for first separator
+ (r'([^\\\n]|\\.)*?(?=[ \f\t=:])', Name.Attribute, "separator"),
+ # empty key
+ (r'.+?$', Name.Attribute),
+ ],
+ 'separator': [
+ # search for line continuation escape
+ (r'([ \f\t]*)([=:]*)([ \f\t]*)(.*(?<!\\)(?:\\{2})*)(\\)(?!\\)$',
+ bygroups(Whitespace, Operator, Whitespace, String, Text), "value", "#pop"),
+ (r'([ \f\t]*)([=:]*)([ \f\t]*)(.*)',
+ bygroups(Whitespace, Operator, Whitespace, String), "#pop"),
+ ],
+ 'value': [ # line continuation
+ (r'\s+', Whitespace),
+ # search for line continuation escape
+ (r'(\s*)(.*(?<!\\)(?:\\{2})*)(\\)(?!\\)([ \t]*)',
+ bygroups(Whitespace, String, Text, Whitespace)),
+ (r'.*$', String, "#pop"),
+ ],
+ }
+
+
+def _rx_indent(level):
+ # Kconfig *always* interprets a tab as 8 spaces, so this is the default.
+ # Edit this if you are in an environment where KconfigLexer gets expanded
+ # input (tabs expanded to spaces) and the expansion tab width is != 8,
+ # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
+ # Value range here is 2 <= {tab_width} <= 8.
+ tab_width = 8
+ # Regex matching a given indentation {level}, assuming that indentation is
+ # a multiple of {tab_width}. In other cases there might be problems.
+ if tab_width == 2:
+ space_repeat = '+'
+ else:
+ space_repeat = '{1,%d}' % (tab_width - 1)
+ if level == 1:
+ level_repeat = ''
+ else:
+ level_repeat = '{%s}' % level
+ return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
+
+
+class KconfigLexer(RegexLexer):
+ """
+ For Linux-style Kconfig files.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Kconfig'
+ aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
+ # Adjust this if new kconfig file names appear in your environment
+ filenames = ['Kconfig*', '*Config.in*', 'external.in*',
+ 'standard-modules.in']
+ mimetypes = ['text/x-kconfig']
+ # No re.MULTILINE, indentation-aware help text needs line-by-line handling
+ flags = 0
+
+ def call_indent(level):
+ # If indentation >= {level} is detected, enter state 'indent{level}'
+ return (_rx_indent(level), String.Doc, 'indent%s' % level)
+
+ def do_indent(level):
+ # Print paragraphs of indentation level >= {level} as String.Doc,
+ # ignoring blank lines. Then return to 'root' state.
+ return [
+ (_rx_indent(level), String.Doc),
+ (r'\s*\n', Text),
+ default('#pop:2')
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#.*?\n', Comment.Single),
+ (words((
+ 'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
+ 'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
+ 'source', 'prompt', 'select', 'depends on', 'default',
+ 'range', 'option'), suffix=r'\b'),
+ Keyword),
+ (r'(---help---|help)[\t ]*\n', Keyword, 'help'),
+ (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
+ Name.Builtin),
+ (r'[!=&|]', Operator),
+ (r'[()]', Punctuation),
+ (r'[0-9]+', Number.Integer),
+ (r"'(''|[^'])*'", String.Single),
+ (r'"(""|[^"])*"', String.Double),
+ (r'\S+', Text),
+ ],
+ # Help text is indented, multi-line and ends when a lower indentation
+ # level is detected.
+ 'help': [
+ # Skip blank lines after help token, if any
+ (r'\s*\n', Text),
+ # Determine the first help line's indentation level heuristically(!).
+ # Attention: this is not perfect, but works for 99% of "normal"
+ # indentation schemes up to a max. indentation level of 7.
+ call_indent(7),
+ call_indent(6),
+ call_indent(5),
+ call_indent(4),
+ call_indent(3),
+ call_indent(2),
+ call_indent(1),
+ default('#pop'), # for incomplete help sections without text
+ ],
+ # Handle text for indentation levels 7 to 1
+ 'indent7': do_indent(7),
+ 'indent6': do_indent(6),
+ 'indent5': do_indent(5),
+ 'indent4': do_indent(4),
+ 'indent3': do_indent(3),
+ 'indent2': do_indent(2),
+ 'indent1': do_indent(1),
+ }
+
+
+class Cfengine3Lexer(RegexLexer):
+ """
+ Lexer for CFEngine3 policy files.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'CFEngine3'
+ url = 'http://cfengine.org'
+ aliases = ['cfengine3', 'cf3']
+ filenames = ['*.cf']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'#.*?\n', Comment),
+ (r'(body)(\s+)(\S+)(\s+)(control)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function, Punctuation),
+ 'arglist'),
+ (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function)),
+ (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
+ bygroups(Punctuation, Name.Variable, Punctuation,
+ Whitespace, Keyword.Type, Whitespace, Operator, Whitespace)),
+ (r'(\S+)(\s*)(=>)(\s*)',
+ bygroups(Keyword.Reserved, Whitespace, Operator, Text)),
+ (r'"', String, 'string'),
+ (r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
+ (r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
+ (r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
+ (r'@[{(][^)}]+[})]', Name.Variable),
+ (r'[(){},;]', Punctuation),
+ (r'=>', Operator),
+ (r'->', Operator),
+ (r'\d+\.\d+', Number.Float),
+ (r'\d+', Number.Integer),
+ (r'\w+', Name.Function),
+ (r'\s+', Whitespace),
+ ],
+ 'string': [
+ (r'\$[{(]', String.Interpol, 'interpol'),
+ (r'\\.', String.Escape),
+ (r'"', String, '#pop'),
+ (r'\n', String),
+ (r'.', String),
+ ],
+ 'interpol': [
+ (r'\$[{(]', String.Interpol, '#push'),
+ (r'[})]', String.Interpol, '#pop'),
+ (r'[^${()}]+', String.Interpol),
+ ],
+ 'arglist': [
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'\w+', Name.Variable),
+ (r'\s+', Whitespace),
+ ],
+ }
+
+
+class ApacheConfLexer(RegexLexer):
+ """
+ Lexer for configuration files following the Apache config file
+ format.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'ApacheConf'
+ aliases = ['apacheconf', 'aconf', 'apache']
+ filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
+ mimetypes = ['text/x-apacheconf']
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#(.*\\\n)+.*$|(#.*?)$', Comment),
+ (r'(<[^\s>/][^\s>]*)(?:(\s+)(.*))?(>)',
+ bygroups(Name.Tag, Whitespace, String, Name.Tag)),
+ (r'(</[^\s>]+)(>)',
+ bygroups(Name.Tag, Name.Tag)),
+ (r'[a-z]\w*', Name.Builtin, 'value'),
+ (r'\.+', Text),
+ ],
+ 'value': [
+ (r'\\\n', Text),
+ (r'\n+', Whitespace, '#pop'),
+ (r'\\', Text),
+ (r'[^\S\n]+', Whitespace),
+ (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
+ (r'\d+', Number),
+ (r'/([*a-z0-9][*\w./-]+)', String.Other),
+ (r'(on|off|none|any|all|double|email|dns|min|minimal|'
+ r'os|productonly|full|emerg|alert|crit|error|warn|'
+ r'notice|info|debug|registry|script|inetd|standalone|'
+ r'user|group)\b', Keyword),
+ (r'"([^"\\]*(?:\\(.|\n)[^"\\]*)*)"', String.Double),
+ (r'[^\s"\\]+', Text)
+ ],
+ }
+
+
+class SquidConfLexer(RegexLexer):
+ """
+ Lexer for squid configuration files.
+
+ .. versionadded:: 0.9
+ """
+
+ name = 'SquidConf'
+ url = 'http://www.squid-cache.org/'
+ aliases = ['squidconf', 'squid.conf', 'squid']
+ filenames = ['squid.conf']
+ mimetypes = ['text/x-squidconf']
+ flags = re.IGNORECASE
+
+ keywords = (
+ "access_log", "acl", "always_direct", "announce_host",
+ "announce_period", "announce_port", "announce_to", "anonymize_headers",
+ "append_domain", "as_whois_server", "auth_param_basic",
+ "authenticate_children", "authenticate_program", "authenticate_ttl",
+ "broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
+ "cache_dir", "cache_dns_program", "cache_effective_group",
+ "cache_effective_user", "cache_host", "cache_host_acl",
+ "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
+ "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
+ "cache_peer_access", "cache_replacement_policy", "cache_stoplist",
+ "cache_stoplist_pattern", "cache_store_log", "cache_swap",
+ "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
+ "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
+ "dead_peer_timeout", "debug_options", "delay_access", "delay_class",
+ "delay_initial_bucket_level", "delay_parameters", "delay_pools",
+ "deny_info", "dns_children", "dns_defnames", "dns_nameservers",
+ "dns_testnames", "emulate_httpd_log", "err_html_text",
+ "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
+ "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
+ "ftp_passive", "ftp_user", "half_closed_clients", "header_access",
+ "header_replace", "hierarchy_stoplist", "high_response_time_warning",
+ "high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
+ "http_anonymizer", "httpd_accel", "httpd_accel_host",
+ "httpd_accel_port", "httpd_accel_uses_host_header",
+ "httpd_accel_with_proxy", "http_port", "http_reply_access",
+ "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
+ "ident_lookup", "ident_lookup_access", "ident_timeout",
+ "incoming_http_average", "incoming_icp_average", "inside_firewall",
+ "ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
+ "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
+ "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
+ "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
+ "mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
+ "memory_pools_limit", "memory_replacement_policy", "mime_table",
+ "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
+ "minimum_object_size", "minimum_retry_timeout", "miss_access",
+ "negative_dns_ttl", "negative_ttl", "neighbor_timeout",
+ "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
+ "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
+ "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
+ "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
+ "quick_abort", "quick_abort_max", "quick_abort_min",
+ "quick_abort_pct", "range_offset_limit", "read_timeout",
+ "redirect_children", "redirect_program",
+ "redirect_rewrites_host_header", "reference_age",
+ "refresh_pattern", "reload_into_ims", "request_body_max_size",
+ "request_size", "request_timeout", "shutdown_lifetime",
+ "single_parent_bypass", "siteselect_timeout", "snmp_access",
+ "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
+ "store_avg_object_size", "store_objects_per_bucket",
+ "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
+ "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
+ "test_reachability", "udp_hit_obj", "udp_hit_obj_size",
+ "udp_incoming_address", "udp_outgoing_address", "unique_hostname",
+ "unlinkd_program", "uri_whitespace", "useragent_log",
+ "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
+ )
+
+ opts = (
+ "proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
+ "multicast-responder", "on", "off", "all", "deny", "allow", "via",
+ "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
+ "credentialsttl", "none", "disable", "offline_toggle", "diskd",
+ )
+
+ actions = (
+ "shutdown", "info", "parameter", "server_list", "client_list",
+ r'squid.conf',
+ )
+
+ actions_stats = (
+ "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
+ "redirector", "io", "reply_headers", "filedescriptors", "netdb",
+ )
+
+ actions_log = ("status", "enable", "disable", "clear")
+
+ acls = (
+ "url_regex", "urlpath_regex", "referer_regex", "port", "proto",
+ "req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
+ "dst", "time", "dstdomain", "ident", "snmp_community",
+ )
+
+ ip_re = (
+ r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
+ r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
+ r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
+ r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
+ r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
+ r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
+ r'[1-9]?\d)){3}))'
+ )
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#', Comment, 'comment'),
+ (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
+ # Actions
+ (words(actions, prefix=r'\b', suffix=r'\b'), String),
+ (words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
+ (words(actions_log, prefix=r'log/', suffix=r'='), String),
+ (words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
+ (ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
+ (r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
+ (r'\S+', Text),
+ ],
+ 'comment': [
+ (r'\s*TAG:.*', String.Escape, '#pop'),
+ (r'.+', Comment, '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class NginxConfLexer(RegexLexer):
+ """
+ Lexer for Nginx configuration files.
+
+ .. versionadded:: 0.11
+ """
+ name = 'Nginx configuration file'
+ url = 'http://nginx.net/'
+ aliases = ['nginx']
+ filenames = ['nginx.conf']
+ mimetypes = ['text/x-nginx-conf']
+
+ tokens = {
+ 'root': [
+ (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Whitespace, Name)),
+ (r'[^\s;#]+', Keyword, 'stmt'),
+ include('base'),
+ ],
+ 'block': [
+ (r'\}', Punctuation, '#pop:2'),
+ (r'[^\s;#]+', Keyword.Namespace, 'stmt'),
+ include('base'),
+ ],
+ 'stmt': [
+ (r'\{', Punctuation, 'block'),
+ (r';', Punctuation, '#pop'),
+ include('base'),
+ ],
+ 'base': [
+ (r'#.*\n', Comment.Single),
+ (r'on|off', Name.Constant),
+ (r'\$[^\s;#()]+', Name.Variable),
+ (r'([a-z0-9.-]+)(:)([0-9]+)',
+ bygroups(Name, Punctuation, Number.Integer)),
+ (r'[a-z-]+/[a-z-+]+', String), # mimetype
+ # (r'[a-zA-Z._-]+', Keyword),
+ (r'[0-9]+[km]?\b', Number.Integer),
+ (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Whitespace, String.Regex)),
+ (r'[:=~]', Punctuation),
+ (r'[^\s;#{}$]+', String), # catch all
+ (r'/[^\s;#]*', Name), # pathname
+ (r'\s+', Whitespace),
+ (r'[$;]', Text), # leftover characters
+ ],
+ }
+
+
+class LighttpdConfLexer(RegexLexer):
+ """
+ Lexer for Lighttpd configuration files.
+
+ .. versionadded:: 0.11
+ """
+ name = 'Lighttpd configuration file'
+ url = 'http://lighttpd.net/'
+ aliases = ['lighttpd', 'lighty']
+ filenames = ['lighttpd.conf']
+ mimetypes = ['text/x-lighttpd-conf']
+
+ tokens = {
+ 'root': [
+ (r'#.*\n', Comment.Single),
+ (r'/\S*', Name), # pathname
+ (r'[a-zA-Z._-]+', Keyword),
+ (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
+ (r'[0-9]+', Number),
+ (r'=>|=~|\+=|==|=|\+', Operator),
+ (r'\$[A-Z]+', Name.Builtin),
+ (r'[(){}\[\],]', Punctuation),
+ (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
+ (r'\s+', Whitespace),
+ ],
+
+ }
+
+
+class DockerLexer(RegexLexer):
+ """
+ Lexer for Docker configuration files.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Docker'
+ url = 'http://docker.io'
+ aliases = ['docker', 'dockerfile']
+ filenames = ['Dockerfile', '*.docker']
+ mimetypes = ['text/x-dockerfile-config']
+
+ _keywords = (r'(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)')
+ _bash_keywords = (r'(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)')
+ _lb = r'(?:\s*\\?\s*)' # dockerfile line break regex
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'#.*', Comment),
+ (r'(FROM)([ \t]*)(\S*)([ \t]*)(?:(AS)([ \t]*)(\S*))?',
+ bygroups(Keyword, Whitespace, String, Whitespace, Keyword, Whitespace, String)),
+ (r'(ONBUILD)(\s+)(%s)' % (_lb,), bygroups(Keyword, Whitespace, using(BashLexer))),
+ (r'(HEALTHCHECK)(\s+)((%s--\w+=\w+%s)*)' % (_lb, _lb),
+ bygroups(Keyword, Whitespace, using(BashLexer))),
+ (r'(VOLUME|ENTRYPOINT|CMD|SHELL)(\s+)(%s)(\[.*?\])' % (_lb,),
+ bygroups(Keyword, Whitespace, using(BashLexer), using(JsonLexer))),
+ (r'(LABEL|ENV|ARG)(\s+)((%s\w+=\w+%s)*)' % (_lb, _lb),
+ bygroups(Keyword, Whitespace, using(BashLexer))),
+ (r'(%s|VOLUME)\b(\s+)(.*)' % (_keywords), bygroups(Keyword, Whitespace, String)),
+ (r'(%s)(\s+)' % (_bash_keywords,), bygroups(Keyword, Whitespace)),
+ (r'(.*\\\n)*.+', using(BashLexer)),
+ ]
+ }
+
+
+class TerraformLexer(ExtendedRegexLexer):
+ """
+ Lexer for terraformi ``.tf`` files.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Terraform'
+ url = 'https://www.terraform.io/'
+ aliases = ['terraform', 'tf']
+ filenames = ['*.tf']
+ mimetypes = ['application/x-tf', 'application/x-terraform']
+
+ classes = ('backend', 'data', 'module', 'output', 'provider',
+ 'provisioner', 'resource', 'variable')
+ classes_re = "({})".format(('|').join(classes))
+
+ types = ('string', 'number', 'bool', 'list', 'tuple', 'map', 'set', 'object', 'null')
+
+ numeric_functions = ('abs', 'ceil', 'floor', 'log', 'max',
+ 'mix', 'parseint', 'pow', 'signum')
+
+ string_functions = ('chomp', 'format', 'formatlist', 'indent',
+ 'join', 'lower', 'regex', 'regexall', 'replace',
+ 'split', 'strrev', 'substr', 'title', 'trim',
+ 'trimprefix', 'trimsuffix', 'trimspace', 'upper'
+ )
+
+ collection_functions = ('alltrue', 'anytrue', 'chunklist', 'coalesce',
+ 'coalescelist', 'compact', 'concat', 'contains',
+ 'distinct', 'element', 'flatten', 'index', 'keys',
+ 'length', 'list', 'lookup', 'map', 'matchkeys',
+ 'merge', 'range', 'reverse', 'setintersection',
+ 'setproduct', 'setsubtract', 'setunion', 'slice',
+ 'sort', 'sum', 'transpose', 'values', 'zipmap'
+ )
+
+ encoding_functions = ('base64decode', 'base64encode', 'base64gzip',
+ 'csvdecode', 'jsondecode', 'jsonencode', 'textdecodebase64',
+ 'textencodebase64', 'urlencode', 'yamldecode', 'yamlencode')
+
+ filesystem_functions = ('abspath', 'dirname', 'pathexpand', 'basename',
+ 'file', 'fileexists', 'fileset', 'filebase64', 'templatefile')
+
+ date_time_functions = ('formatdate', 'timeadd', 'timestamp')
+
+ hash_crypto_functions = ('base64sha256', 'base64sha512', 'bcrypt', 'filebase64sha256',
+ 'filebase64sha512', 'filemd5', 'filesha1', 'filesha256', 'filesha512',
+ 'md5', 'rsadecrypt', 'sha1', 'sha256', 'sha512', 'uuid', 'uuidv5')
+
+ ip_network_functions = ('cidrhost', 'cidrnetmask', 'cidrsubnet', 'cidrsubnets')
+
+ type_conversion_functions = ('can', 'defaults', 'tobool', 'tolist', 'tomap',
+ 'tonumber', 'toset', 'tostring', 'try')
+
+ builtins = numeric_functions + string_functions + collection_functions + encoding_functions +\
+ filesystem_functions + date_time_functions + hash_crypto_functions + ip_network_functions +\
+ type_conversion_functions
+ builtins_re = "({})".format(('|').join(builtins))
+
+ def heredoc_callback(self, match, ctx):
+ # Parse a terraform heredoc
+ # match: 1 = <<[-]?, 2 = name 3 = rest of line
+
+ start = match.start(1)
+ yield start, Operator, match.group(1) # <<[-]?
+ yield match.start(2), String.Delimiter, match.group(2) # heredoc name
+
+ ctx.pos = match.start(3)
+ ctx.end = match.end(3)
+ yield ctx.pos, String.Heredoc, match.group(3)
+ ctx.pos = match.end()
+
+ hdname = match.group(2)
+ tolerant = True # leading whitespace is always accepted
+
+ lines = []
+
+ for match in line_re.finditer(ctx.text, ctx.pos):
+ if tolerant:
+ check = match.group().strip()
+ else:
+ check = match.group().rstrip()
+ if check == hdname:
+ for amatch in lines:
+ yield amatch.start(), String.Heredoc, amatch.group()
+ yield match.start(), String.Delimiter, match.group()
+ ctx.pos = match.end()
+ break
+ else:
+ lines.append(match)
+ else:
+ # end of heredoc not found -- error!
+ for amatch in lines:
+ yield amatch.start(), Error, amatch.group()
+ ctx.end = len(ctx.text)
+
+ tokens = {
+ 'root': [
+ include('basic'),
+ include('whitespace'),
+
+ # Strings
+ (r'(".*")', bygroups(String.Double)),
+
+ # Constants
+ (words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Name.Constant),
+
+ # Types
+ (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+
+ include('identifier'),
+ include('punctuation'),
+ (r'[0-9]+', Number),
+ ],
+ 'basic': [
+ (r'\s*/\*', Comment.Multiline, 'comment'),
+ (r'\s*(#|//).*\n', Comment.Single),
+ include('whitespace'),
+
+ # e.g. terraform {
+ # e.g. egress {
+ (r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=?)(\s*)(\{)',
+ bygroups(Whitespace, Name.Builtin, Whitespace, Operator, Whitespace, Punctuation)),
+
+ # Assignment with attributes, e.g. something = ...
+ (r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=)(\s*)',
+ bygroups(Whitespace, Name.Attribute, Whitespace, Operator, Whitespace)),
+
+ # Assignment with environment variables and similar, e.g. "something" = ...
+ # or key value assignment, e.g. "SlotName" : ...
+ (r'(\s*)("\S+")(\s*)([=:])(\s*)',
+ bygroups(Whitespace, Literal.String.Double, Whitespace, Operator, Whitespace)),
+
+ # Functions, e.g. jsonencode(element("value"))
+ (builtins_re + r'(\()', bygroups(Name.Function, Punctuation)),
+
+ # List of attributes, e.g. ignore_changes = [last_modified, filename]
+ (r'(\[)([a-z_,\s]+)(\])', bygroups(Punctuation, Name.Builtin, Punctuation)),
+
+ # e.g. resource "aws_security_group" "allow_tls" {
+ # e.g. backend "consul" {
+ (classes_re + r'(\s+)("[0-9a-zA-Z-_]+")?(\s*)("[0-9a-zA-Z-_]+")(\s+)(\{)',
+ bygroups(Keyword.Reserved, Whitespace, Name.Class, Whitespace, Name.Variable, Whitespace, Punctuation)),
+
+ # here-doc style delimited strings
+ (r'(<<-?)\s*([a-zA-Z_]\w*)(.*?\n)', heredoc_callback),
+ ],
+ 'identifier': [
+ (r'\b(var\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)),
+ (r'\b([0-9a-zA-Z-_\[\]]+\.[0-9a-zA-Z-_\.\[\]]+)\b',
+ bygroups(Name.Variable)),
+ ],
+ 'punctuation': [
+ (r'[\[\]()\{\},.?:!=]', Punctuation),
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)),
+ ],
+ }
+
+
+class TermcapLexer(RegexLexer):
+ """
+ Lexer for termcap database source.
+
+ This is very simple and minimal.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Termcap'
+ aliases = ['termcap']
+ filenames = ['termcap', 'termcap.src']
+ mimetypes = []
+
+ # NOTE:
+ # * multiline with trailing backslash
+ # * separator is ':'
+ # * to embed colon as data, we must use \072
+ # * space after separator is not allowed (mayve)
+ tokens = {
+ 'root': [
+ (r'^#.*', Comment),
+ (r'^[^\s#:|]+', Name.Tag, 'names'),
+ (r'\s+', Whitespace),
+ ],
+ 'names': [
+ (r'\n', Whitespace, '#pop'),
+ (r':', Punctuation, 'defs'),
+ (r'\|', Punctuation),
+ (r'[^:|]+', Name.Attribute),
+ ],
+ 'defs': [
+ (r'(\\)(\n[ \t]*)', bygroups(Text, Whitespace)),
+ (r'\n[ \t]*', Whitespace, '#pop:2'),
+ (r'(#)([0-9]+)', bygroups(Operator, Number)),
+ (r'=', Operator, 'data'),
+ (r':', Punctuation),
+ (r'[^\s:=#]+', Name.Class),
+ ],
+ 'data': [
+ (r'\\072', Literal),
+ (r':', Punctuation, '#pop'),
+ (r'[^:\\]+', Literal), # for performance
+ (r'.', Literal),
+ ],
+ }
+
+
+class TerminfoLexer(RegexLexer):
+ """
+ Lexer for terminfo database source.
+
+ This is very simple and minimal.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Terminfo'
+ aliases = ['terminfo']
+ filenames = ['terminfo', 'terminfo.src']
+ mimetypes = []
+
+ # NOTE:
+ # * multiline with leading whitespace
+ # * separator is ','
+ # * to embed comma as data, we can use \,
+ # * space after separator is allowed
+ tokens = {
+ 'root': [
+ (r'^#.*$', Comment),
+ (r'^[^\s#,|]+', Name.Tag, 'names'),
+ (r'\s+', Whitespace),
+ ],
+ 'names': [
+ (r'\n', Whitespace, '#pop'),
+ (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace), 'defs'),
+ (r'\|', Punctuation),
+ (r'[^,|]+', Name.Attribute),
+ ],
+ 'defs': [
+ (r'\n[ \t]+', Whitespace),
+ (r'\n', Whitespace, '#pop:2'),
+ (r'(#)([0-9]+)', bygroups(Operator, Number)),
+ (r'=', Operator, 'data'),
+ (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace)),
+ (r'[^\s,=#]+', Name.Class),
+ ],
+ 'data': [
+ (r'\\[,\\]', Literal),
+ (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace), '#pop'),
+ (r'[^\\,]+', Literal), # for performance
+ (r'.', Literal),
+ ],
+ }
+
+
+class PkgConfigLexer(RegexLexer):
+ """
+ Lexer for pkg-config
+ (see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'PkgConfig'
+ url = 'http://www.freedesktop.org/wiki/Software/pkg-config/'
+ aliases = ['pkgconfig']
+ filenames = ['*.pc']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'#.*$', Comment.Single),
+
+ # variable definitions
+ (r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
+
+ # keyword lines
+ (r'^([\w.]+)(:)',
+ bygroups(Name.Tag, Punctuation), 'spvalue'),
+
+ # variable references
+ include('interp'),
+
+ # fallback
+ (r'\s+', Whitespace),
+ (r'[^${}#=:\n.]+', Text),
+ (r'.', Text),
+ ],
+ 'interp': [
+ # you can escape literal "$" as "$$"
+ (r'\$\$', Text),
+
+ # variable references
+ (r'\$\{', String.Interpol, 'curly'),
+ ],
+ 'curly': [
+ (r'\}', String.Interpol, '#pop'),
+ (r'\w+', Name.Attribute),
+ ],
+ 'spvalue': [
+ include('interp'),
+
+ (r'#.*$', Comment.Single, '#pop'),
+ (r'\n', Whitespace, '#pop'),
+
+ # fallback
+ (r'\s+', Whitespace),
+ (r'[^${}#\n\s]+', Text),
+ (r'.', Text),
+ ],
+ }
+
+
+class PacmanConfLexer(RegexLexer):
+ """
+ Lexer for pacman.conf.
+
+ Actually, IniLexer works almost fine for this format,
+ but it yield error token. It is because pacman.conf has
+ a form without assignment like:
+
+ UseSyslog
+ Color
+ TotalDownload
+ CheckSpace
+ VerbosePkgLists
+
+ These are flags to switch on.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'PacmanConf'
+ url = 'https://www.archlinux.org/pacman/pacman.conf.5.html'
+ aliases = ['pacmanconf']
+ filenames = ['pacman.conf']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ # comment
+ (r'#.*$', Comment.Single),
+
+ # section header
+ (r'^(\s*)(\[.*?\])(\s*)$', bygroups(Whitespace, Keyword, Whitespace)),
+
+ # variable definitions
+ # (Leading space is allowed...)
+ (r'(\w+)(\s*)(=)',
+ bygroups(Name.Attribute, Whitespace, Operator)),
+
+ # flags to on
+ (r'^(\s*)(\w+)(\s*)$',
+ bygroups(Whitespace, Name.Attribute, Whitespace)),
+
+ # built-in special values
+ (words((
+ '$repo', # repository
+ '$arch', # architecture
+ '%o', # outfile
+ '%u', # url
+ ), suffix=r'\b'),
+ Name.Variable),
+
+ # fallback
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ }
+
+
+class AugeasLexer(RegexLexer):
+ """
+ Lexer for Augeas.
+
+ .. versionadded:: 2.4
+ """
+ name = 'Augeas'
+ url = 'http://augeas.net'
+ aliases = ['augeas']
+ filenames = ['*.aug']
+
+ tokens = {
+ 'root': [
+ (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+ (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Whitespace, Name.Variable)),
+ (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Whitespace)),
+ (r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)),
+ (r'\(\*', Comment.Multiline, 'comment'),
+ (r'[*+\-.;=?|]', Operator),
+ (r'[()\[\]{}]', Operator),
+ (r'"', String.Double, 'string'),
+ (r'\/', String.Regex, 'regex'),
+ (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)),
+ (r'.', Name.Variable),
+ (r'\s+', Whitespace),
+ ],
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'[^"]', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'regex': [
+ (r'\\.', String.Escape),
+ (r'[^/]', String.Regex),
+ (r'\/', String.Regex, '#pop'),
+ ],
+ 'comment': [
+ (r'[^*)]', Comment.Multiline),
+ (r'\(\*', Comment.Multiline, '#push'),
+ (r'\*\)', Comment.Multiline, '#pop'),
+ (r'[)*]', Comment.Multiline)
+ ],
+ }
+
+
+class TOMLLexer(RegexLexer):
+ """
+ Lexer for TOML, a simple language
+ for config files.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'TOML'
+ url = 'https://github.com/toml-lang/toml'
+ aliases = ['toml']
+ filenames = ['*.toml', 'Pipfile', 'poetry.lock']
+
+ tokens = {
+ 'root': [
+ # Table
+ (r'^(\s*)(\[.*?\])$', bygroups(Whitespace, Keyword)),
+
+ # Basics, comments, strings
+ (r'[ \t]+', Whitespace),
+ (r'\n', Whitespace),
+ (r'#.*?$', Comment.Single),
+ # Basic string
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # Literal string
+ (r'\'\'\'(.*)\'\'\'', String),
+ (r'\'[^\']*\'', String),
+ (r'(true|false)$', Keyword.Constant),
+ (r'[a-zA-Z_][\w\-]*', Name),
+
+ # Datetime
+ # TODO this needs to be expanded, as TOML is rather flexible:
+ # https://github.com/toml-lang/toml#offset-date-time
+ (r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer),
+
+ # Numbers
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
+ # Handle +-inf, +-infinity, +-nan
+ (r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float),
+ (r'[+-]?\d+', Number.Integer),
+
+ # Punctuation
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\.', Punctuation),
+
+ # Operators
+ (r'=', Operator)
+
+ ]
+ }
+
+class NestedTextLexer(RegexLexer):
+ """
+ Lexer for NextedText, a human-friendly data
+ format.
+
+ .. versionadded:: 2.9
+ """
+
+ name = 'NestedText'
+ url = 'https://nestedtext.org'
+ aliases = ['nestedtext', 'nt']
+ filenames = ['*.nt']
+
+ _quoted_dict_item = r'^(\s*)({0})(.*?)({0}: ?)(.*?)(\s*)$'
+
+ tokens = {
+ 'root': [
+ (r'^(\s*)(#.*?)$', bygroups(Whitespace, Comment)),
+ (r'^(\s*)(>)( ?)(.*?)(\s*)$', bygroups(Whitespace, Punctuation, Whitespace, String, Whitespace)),
+ (r'^(\s*)(-)( ?)(.*?)(\s*)$', bygroups(Whitespace, Punctuation, Whitespace, String, Whitespace)),
+ (_quoted_dict_item.format("'"), bygroups(Whitespace, Punctuation, Name, Punctuation, String, Whitespace)),
+ (_quoted_dict_item.format('"'), bygroups(Whitespace, Punctuation, Name, Punctuation, String, Whitespace)),
+ (r'^(\s*)(.*?)(:)( ?)(.*?)(\s*)$', bygroups(Whitespace, Name, Punctuation, Whitespace, String, Whitespace)),
+ ],
+ }
+
+
+class SingularityLexer(RegexLexer):
+ """
+ Lexer for Singularity definition files.
+
+ .. versionadded:: 2.6
+ """
+
+ name = 'Singularity'
+ url = 'https://www.sylabs.io/guides/3.0/user-guide/definition_files.html'
+ aliases = ['singularity']
+ filenames = ['*.def', 'Singularity']
+ flags = re.IGNORECASE | re.MULTILINE | re.DOTALL
+
+ _headers = r'^(\s*)(bootstrap|from|osversion|mirrorurl|include|registry|namespace|includecmd)(:)'
+ _section = r'^(%(?:pre|post|setup|environment|help|labels|test|runscript|files|startscript))(\s*)'
+ _appsect = r'^(%app(?:install|help|run|labels|env|test|files))(\s*)'
+
+ tokens = {
+ 'root': [
+ (_section, bygroups(Generic.Heading, Whitespace), 'script'),
+ (_appsect, bygroups(Generic.Heading, Whitespace), 'script'),
+ (_headers, bygroups(Whitespace, Keyword, Text)),
+ (r'\s*#.*?\n', Comment),
+ (r'\b(([0-9]+\.?[0-9]*)|(\.[0-9]+))\b', Number),
+ (r'[ \t]+', Whitespace),
+ (r'(?!^\s*%).', Text),
+ ],
+ 'script': [
+ (r'(.+?(?=^\s*%))|(.*)', using(BashLexer), '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ """This is a quite simple script file, but there are a few keywords
+ which seem unique to this language."""
+ result = 0
+ if re.search(r'\b(?:osversion|includecmd|mirrorurl)\b', text, re.IGNORECASE):
+ result += 0.5
+
+ if re.search(SingularityLexer._section[1:], text):
+ result += 0.49
+
+ return result
+
+
+class UnixConfigLexer(RegexLexer):
+ """
+ Lexer for Unix/Linux config files using colon-separated values, e.g.
+
+ * ``/etc/group``
+ * ``/etc/passwd``
+ * ``/etc/shadow``
+
+ .. versionadded:: 2.12
+ """
+
+ name = 'Unix/Linux config files'
+ aliases = ['unixconfig', 'linuxconfig']
+ filenames = []
+
+ tokens = {
+ 'root': [
+ (r'^#.*', Comment),
+ (r'\n', Whitespace),
+ (r':', Punctuation),
+ (r'[0-9]+', Number),
+ (r'((?!\n)[a-zA-Z0-9\_\-\s\(\),]){2,}', Text),
+ (r'[^:\n]+', String),
+ ],
+ }
diff --git a/pygments/lexers/console.py b/pygments/lexers/console.py
new file mode 100644
index 0000000..d4c0f0e
--- /dev/null
+++ b/pygments/lexers/console.py
@@ -0,0 +1,114 @@
+"""
+ pygments.lexers.console
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for misc console output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Generic, Comment, String, Text, Keyword, Name, \
+ Punctuation, Number, Whitespace
+
+__all__ = ['VCTreeStatusLexer', 'PyPyLogLexer']
+
+
+class VCTreeStatusLexer(RegexLexer):
+ """
+ For colorizing output of version control status commands, like "hg
+ status" or "svn status".
+
+ .. versionadded:: 2.0
+ """
+ name = 'VCTreeStatus'
+ aliases = ['vctreestatus']
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'^A \+ C\s+', Generic.Error),
+ (r'^A\s+\+?\s+', String),
+ (r'^M\s+', Generic.Inserted),
+ (r'^C\s+', Generic.Error),
+ (r'^D\s+', Generic.Deleted),
+ (r'^[?!]\s+', Comment.Preproc),
+ (r' >\s+.*\n', Comment.Preproc),
+ (r'\S+', Text),
+ (r'\s+', Whitespace),
+ ]
+ }
+
+
+class PyPyLogLexer(RegexLexer):
+ """
+ Lexer for PyPy log files.
+
+ .. versionadded:: 1.5
+ """
+ name = "PyPy Log"
+ aliases = ["pypylog", "pypy"]
+ filenames = ["*.pypylog"]
+ mimetypes = ['application/x-pypylog']
+
+ tokens = {
+ "root": [
+ (r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"),
+ (r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"),
+ include("extra-stuff"),
+ ],
+ "jit-log": [
+ (r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
+ (r"^\+\d+: ", Comment),
+ (r"--end of the loop--", Comment),
+ (r"[ifp]\d+", Name),
+ (r"ptr\d+", Name),
+ (r"(\()(\w+(?:\.\w+)?)(\))",
+ bygroups(Punctuation, Name.Builtin, Punctuation)),
+ (r"[\[\]=,()]", Punctuation),
+ (r"(\d+\.\d+|inf|-inf)", Number.Float),
+ (r"-?\d+", Number.Integer),
+ (r"'.*'", String),
+ (r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
+ (r"<.*?>+", Name.Builtin),
+ (r"(label|debug_merge_point|jump|finish)", Name.Class),
+ (r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
+ r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
+ r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
+ r"int_is_true|"
+ r"uint_floordiv|uint_ge|uint_lt|"
+ r"float_add|float_sub|float_mul|float_truediv|float_neg|"
+ r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
+ r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
+ r"cast_int_to_float|cast_float_to_int|"
+ r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
+ r"virtual_ref|mark_opaque_ptr|"
+ r"call_may_force|call_assembler|call_loopinvariant|"
+ r"call_release_gil|call_pure|call|"
+ r"new_with_vtable|new_array|newstr|newunicode|new|"
+ r"arraylen_gc|"
+ r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
+ r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
+ r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
+ r"getfield_raw|setfield_gc|setfield_raw|"
+ r"strgetitem|strsetitem|strlen|copystrcontent|"
+ r"unicodegetitem|unicodesetitem|unicodelen|"
+ r"guard_true|guard_false|guard_value|guard_isnull|"
+ r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
+ r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
+ Name.Builtin),
+ include("extra-stuff"),
+ ],
+ "jit-backend-counts": [
+ (r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
+ (r":", Punctuation),
+ (r"\d+", Number),
+ include("extra-stuff"),
+ ],
+ "extra-stuff": [
+ (r"\s+", Whitespace),
+ (r"#.*?$", Comment),
+ ],
+ }
diff --git a/pygments/lexers/cplint.py b/pygments/lexers/cplint.py
new file mode 100644
index 0000000..9ef3c9b
--- /dev/null
+++ b/pygments/lexers/cplint.py
@@ -0,0 +1,44 @@
+"""
+ pygments.lexers.cplint
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the cplint language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import bygroups, inherit, words
+from pygments.lexers import PrologLexer
+from pygments.token import Operator, Keyword, Name, String, Punctuation
+
+__all__ = ['CplintLexer']
+
+
+class CplintLexer(PrologLexer):
+ """
+ Lexer for cplint files, including CP-logic, Logic Programs with Annotated
+ Disjunctions, Distributional Clauses syntax, ProbLog, DTProbLog.
+
+ .. versionadded:: 2.12
+ """
+ name = 'cplint'
+ url = 'https://cplint.eu'
+ aliases = ['cplint']
+ filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl']
+ mimetypes = ['text/x-cplint']
+
+ tokens = {
+ 'root': [
+ (r'map_query', Keyword),
+ (words(('gaussian', 'uniform_dens', 'dirichlet', 'gamma', 'beta',
+ 'poisson', 'binomial', 'geometric', 'exponential', 'pascal',
+ 'multinomial', 'user', 'val', 'uniform', 'discrete',
+ 'finite')), Name.Builtin),
+ # annotations of atoms
+ (r'([a-z]+)(:)', bygroups(String.Atom, Punctuation)),
+ (r':(-|=)|::?|~=?|=>', Operator),
+ (r'\?', Name.Builtin),
+ inherit,
+ ],
+ }
diff --git a/pygments/lexers/crystal.py b/pygments/lexers/crystal.py
new file mode 100644
index 0000000..2a0238e
--- /dev/null
+++ b/pygments/lexers/crystal.py
@@ -0,0 +1,365 @@
+"""
+ pygments.lexers.crystal
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Crystal.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import ExtendedRegexLexer, include, bygroups, default, \
+ words, line_re
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
+ Punctuation, Error, Whitespace
+
+__all__ = ['CrystalLexer']
+
+
+CRYSTAL_OPERATORS = [
+ '!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<',
+ '===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~'
+]
+
+
+class CrystalLexer(ExtendedRegexLexer):
+ """
+ For Crystal source code.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'Crystal'
+ url = 'http://crystal-lang.org'
+ aliases = ['cr', 'crystal']
+ filenames = ['*.cr']
+ mimetypes = ['text/x-crystal']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ def heredoc_callback(self, match, ctx):
+ # okay, this is the hardest part of parsing Crystal...
+ # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
+
+ start = match.start(1)
+ yield start, Operator, match.group(1) # <<-?
+ yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
+ yield match.start(3), String.Delimiter, match.group(3) # heredoc name
+ yield match.start(4), String.Heredoc, match.group(4) # quote again
+
+ heredocstack = ctx.__dict__.setdefault('heredocstack', [])
+ outermost = not bool(heredocstack)
+ heredocstack.append((match.group(1) == '<<-', match.group(3)))
+
+ ctx.pos = match.start(5)
+ ctx.end = match.end(5)
+ # this may find other heredocs, so limit the recursion depth
+ if len(heredocstack) < 100:
+ yield from self.get_tokens_unprocessed(context=ctx)
+ else:
+ yield ctx.pos, String.Heredoc, match.group(5)
+ ctx.pos = match.end()
+
+ if outermost:
+ # this is the outer heredoc again, now we can process them all
+ for tolerant, hdname in heredocstack:
+ lines = []
+ for match in line_re.finditer(ctx.text, ctx.pos):
+ if tolerant:
+ check = match.group().strip()
+ else:
+ check = match.group().rstrip()
+ if check == hdname:
+ for amatch in lines:
+ yield amatch.start(), String.Heredoc, amatch.group()
+ yield match.start(), String.Delimiter, match.group()
+ ctx.pos = match.end()
+ break
+ else:
+ lines.append(match)
+ else:
+ # end of heredoc not found -- error!
+ for amatch in lines:
+ yield amatch.start(), Error, amatch.group()
+ ctx.end = len(ctx.text)
+ del heredocstack[:]
+
+ def gen_crystalstrings_rules():
+ states = {}
+ states['strings'] = [
+ (r'\:\w+[!?]?', String.Symbol),
+ (words(CRYSTAL_OPERATORS, prefix=r'\:'), String.Symbol),
+ (r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
+ # This allows arbitrary text after '\ for simplicity
+ (r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
+ (r':"', String.Symbol, 'simple-sym'),
+ # Crystal doesn't have "symbol:"s but this simplifies function args
+ (r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
+ (r'"', String.Double, 'simple-string'),
+ (r'(?<!\.)`', String.Backtick, 'simple-backtick'),
+ ]
+
+ # double-quoted string and symbol
+ for name, ttype, end in ('string', String.Double, '"'), \
+ ('sym', String.Symbol, '"'), \
+ ('backtick', String.Backtick, '`'):
+ states['simple-'+name] = [
+ include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
+ (r'[^\\%s#]+' % end, ttype),
+ (r'[\\#]', ttype),
+ (end, ttype, '#pop'),
+ ]
+
+ # https://crystal-lang.org/docs/syntax_and_semantics/literals/string.html#percent-string-literals
+ for lbrace, rbrace, bracecc, name in \
+ ('\\{', '\\}', '{}', 'cb'), \
+ ('\\[', '\\]', '\\[\\]', 'sb'), \
+ ('\\(', '\\)', '()', 'pa'), \
+ ('<', '>', '<>', 'ab'), \
+ ('\\|', '\\|', '\\|', 'pi'):
+ states[name+'-intp-string'] = [
+ (r'\\' + lbrace, String.Other),
+ ] + (lbrace != rbrace) * [
+ (lbrace, String.Other, '#push'),
+ ] + [
+ (rbrace, String.Other, '#pop'),
+ include('string-intp-escaped'),
+ (r'[\\#' + bracecc + ']', String.Other),
+ (r'[^\\#' + bracecc + ']+', String.Other),
+ ]
+ states['strings'].append((r'%Q?' + lbrace, String.Other,
+ name+'-intp-string'))
+ states[name+'-string'] = [
+ (r'\\[\\' + bracecc + ']', String.Other),
+ ] + (lbrace != rbrace) * [
+ (lbrace, String.Other, '#push'),
+ ] + [
+ (rbrace, String.Other, '#pop'),
+ (r'[\\#' + bracecc + ']', String.Other),
+ (r'[^\\#' + bracecc + ']+', String.Other),
+ ]
+ # https://crystal-lang.org/docs/syntax_and_semantics/literals/array.html#percent-array-literals
+ states['strings'].append((r'%[qwi]' + lbrace, String.Other,
+ name+'-string'))
+ states[name+'-regex'] = [
+ (r'\\[\\' + bracecc + ']', String.Regex),
+ ] + (lbrace != rbrace) * [
+ (lbrace, String.Regex, '#push'),
+ ] + [
+ (rbrace + '[imsx]*', String.Regex, '#pop'),
+ include('string-intp'),
+ (r'[\\#' + bracecc + ']', String.Regex),
+ (r'[^\\#' + bracecc + ']+', String.Regex),
+ ]
+ states['strings'].append((r'%r' + lbrace, String.Regex,
+ name+'-regex'))
+
+ return states
+
+ tokens = {
+ 'root': [
+ (r'#.*?$', Comment.Single),
+ # keywords
+ (words('''
+ abstract asm begin break case do else elsif end ensure extend if in
+ include next of private protected require rescue return select self super
+ then unless until when while with yield
+ '''.split(), suffix=r'\b'), Keyword),
+ (words('''
+ previous_def forall out uninitialized __DIR__ __FILE__ __LINE__
+ __END_LINE__
+ '''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
+ # https://crystal-lang.org/docs/syntax_and_semantics/is_a.html
+ (r'\.(is_a\?|nil\?|responds_to\?|as\?|as\b)', Keyword.Pseudo),
+ (words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
+ # start of function, class and module names
+ (r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
+ bygroups(Keyword, Whitespace, Name.Namespace), 'funcname'),
+ (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
+ (r'(annotation|class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
+ bygroups(Keyword, Whitespace, Name.Namespace), 'classname'),
+ # https://crystal-lang.org/api/toplevel.html
+ (words('''
+ instance_sizeof offsetof pointerof sizeof typeof
+ '''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
+ # macros
+ (r'(?<!\.)(debugger\b|p!|pp!|record\b|spawn\b)', Name.Builtin.Pseudo),
+ # builtins
+ (words('''
+ abort at_exit caller exit gets loop main p pp print printf puts
+ raise rand read_line sleep spawn sprintf system
+ '''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
+ # https://crystal-lang.org/api/Object.html#macro-summary
+ (r'(?<!\.)(((class_)?((getter|property)\b[!?]?|setter\b))|'
+ r'(def_(clone|equals|equals_and_hash|hash)|delegate|forward_missing_to)\b)',
+ Name.Builtin.Pseudo),
+ # normal heredocs
+ (r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
+ heredoc_callback),
+ # empty string heredocs
+ (r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
+ (r'__END__', Comment.Preproc, 'end-part'),
+ # multiline regex (after keywords or assignments)
+ (r'(?:^|(?<=[=<>~!:])|'
+ r'(?<=(?:\s|;)when\s)|'
+ r'(?<=(?:\s|;)or\s)|'
+ r'(?<=(?:\s|;)and\s)|'
+ r'(?<=\.index\s)|'
+ r'(?<=\.scan\s)|'
+ r'(?<=\.sub\s)|'
+ r'(?<=\.sub!\s)|'
+ r'(?<=\.gsub\s)|'
+ r'(?<=\.gsub!\s)|'
+ r'(?<=\.match\s)|'
+ r'(?<=(?:\s|;)if\s)|'
+ r'(?<=(?:\s|;)elsif\s)|'
+ r'(?<=^when\s)|'
+ r'(?<=^index\s)|'
+ r'(?<=^scan\s)|'
+ r'(?<=^sub\s)|'
+ r'(?<=^gsub\s)|'
+ r'(?<=^sub!\s)|'
+ r'(?<=^gsub!\s)|'
+ r'(?<=^match\s)|'
+ r'(?<=^if\s)|'
+ r'(?<=^elsif\s)'
+ r')(\s*)(/)', bygroups(Whitespace, String.Regex), 'multiline-regex'),
+ # multiline regex (in method calls or subscripts)
+ (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
+ # multiline regex (this time the funny no whitespace rule)
+ (r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
+ 'multiline-regex'),
+ # lex numbers and ignore following regular expressions which
+ # are division operators in fact (grrrr. i hate that. any
+ # better ideas?)
+ # since pygments 0.7 we also eat a "?" operator after numbers
+ # so that the char operator does not work. Chars are not allowed
+ # there so that you can use the ternary operator.
+ # stupid example:
+ # x>=0?n[x]:""
+ (r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
+ bygroups(Number.Oct, Whitespace, Operator)),
+ (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
+ bygroups(Number.Hex, Whitespace, Operator)),
+ (r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
+ bygroups(Number.Bin, Whitespace, Operator)),
+ # 3 separate expressions for floats because any of the 3 optional
+ # parts makes it a float
+ (r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
+ r'(?:_?f[0-9]+)?)(\s*)([/?])?',
+ bygroups(Number.Float, Whitespace, Operator)),
+ (r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
+ r'(?:_?f[0-9]+)?)(\s*)([/?])?',
+ bygroups(Number.Float, Whitespace, Operator)),
+ (r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
+ r'(?:_?f[0-9]+))(\s*)([/?])?',
+ bygroups(Number.Float, Whitespace, Operator)),
+ (r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
+ bygroups(Number.Integer, Whitespace, Operator)),
+ # Names
+ (r'@@[a-zA-Z_]\w*', Name.Variable.Class),
+ (r'@[a-zA-Z_]\w*', Name.Variable.Instance),
+ (r'\$\w+', Name.Variable.Global),
+ (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
+ (r'\$-[0adFiIlpvw]', Name.Variable.Global),
+ (r'::', Operator),
+ include('strings'),
+ # https://crystal-lang.org/reference/syntax_and_semantics/literals/char.html
+ (r'\?(\\[MC]-)*' # modifiers
+ r'(\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})|\S)'
+ r'(?!\w)',
+ String.Char),
+ (r'[A-Z][A-Z_]+\b(?!::|\.)', Name.Constant),
+ # macro expansion
+ (r'\{%', String.Interpol, 'in-macro-control'),
+ (r'\{\{', String.Interpol, 'in-macro-expr'),
+ # annotations
+ (r'(@\[)(\s*)([A-Z]\w*(::[A-Z]\w*)*)',
+ bygroups(Operator, Whitespace, Name.Decorator), 'in-annot'),
+ # this is needed because Crystal attributes can look
+ # like keywords (class) or like this: ` ?!?
+ (words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
+ bygroups(Operator, Name.Operator)),
+ (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
+ bygroups(Operator, Name)),
+ # Names can end with [!?] unless it's "!="
+ (r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
+ (r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
+ r'!~|&&?|\|\||\.{1,3})', Operator),
+ (r'[-+/*%=<>&!^|~]=?', Operator),
+ (r'[(){};,/?:\\]', Punctuation),
+ (r'\s+', Whitespace)
+ ],
+ 'funcname': [
+ (r'(?:([a-zA-Z_]\w*)(\.))?'
+ r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
+ r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
+ bygroups(Name.Class, Operator, Name.Function), '#pop'),
+ default('#pop')
+ ],
+ 'classname': [
+ (r'[A-Z_]\w*', Name.Class),
+ (r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
+ bygroups(Punctuation, Whitespace, Name.Class, Whitespace, Punctuation)),
+ default('#pop')
+ ],
+ 'in-intp': [
+ (r'\{', String.Interpol, '#push'),
+ (r'\}', String.Interpol, '#pop'),
+ include('root'),
+ ],
+ 'string-intp': [
+ (r'#\{', String.Interpol, 'in-intp'),
+ ],
+ 'string-escaped': [
+ # https://crystal-lang.org/reference/syntax_and_semantics/literals/string.html
+ (r'\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})',
+ String.Escape)
+ ],
+ 'string-intp-escaped': [
+ include('string-intp'),
+ include('string-escaped'),
+ ],
+ 'interpolated-regex': [
+ include('string-intp'),
+ (r'[\\#]', String.Regex),
+ (r'[^\\#]+', String.Regex),
+ ],
+ 'interpolated-string': [
+ include('string-intp'),
+ (r'[\\#]', String.Other),
+ (r'[^\\#]+', String.Other),
+ ],
+ 'multiline-regex': [
+ include('string-intp'),
+ (r'\\\\', String.Regex),
+ (r'\\/', String.Regex),
+ (r'[\\#]', String.Regex),
+ (r'[^\\/#]+', String.Regex),
+ (r'/[imsx]*', String.Regex, '#pop'),
+ ],
+ 'end-part': [
+ (r'.+', Comment.Preproc, '#pop')
+ ],
+ 'in-macro-control': [
+ (r'\{%', String.Interpol, '#push'),
+ (r'%\}', String.Interpol, '#pop'),
+ (r'(for|verbatim)\b', Keyword),
+ include('root'),
+ ],
+ 'in-macro-expr': [
+ (r'\{\{', String.Interpol, '#push'),
+ (r'\}\}', String.Interpol, '#pop'),
+ include('root'),
+ ],
+ 'in-annot': [
+ (r'\[', Operator, '#push'),
+ (r'\]', Operator, '#pop'),
+ include('root'),
+ ],
+ }
+ tokens.update(gen_crystalstrings_rules())
diff --git a/pygments/lexers/csound.py b/pygments/lexers/csound.py
new file mode 100644
index 0000000..014a6f5
--- /dev/null
+++ b/pygments/lexers/csound.py
@@ -0,0 +1,468 @@
+"""
+ pygments.lexers.csound
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Csound languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, default, include, using, words
+from pygments.token import Comment, Error, Keyword, Name, Number, Operator, Punctuation, \
+ String, Text, Whitespace
+from pygments.lexers._csound_builtins import OPCODES, DEPRECATED_OPCODES, REMOVED_OPCODES
+from pygments.lexers.html import HtmlLexer
+from pygments.lexers.python import PythonLexer
+from pygments.lexers.scripting import LuaLexer
+
+__all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer']
+
+newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text))
+
+
+class CsoundLexer(RegexLexer):
+ url = 'https://csound.com/'
+
+ tokens = {
+ 'whitespace': [
+ (r'[ \t]+', Whitespace),
+ (r'/[*](?:.|\n)*?[*]/', Comment.Multiline),
+ (r'(?:;|//).*$', Comment.Single),
+ (r'(\\)(\n)', bygroups(Text, Whitespace))
+ ],
+
+ 'preprocessor directives': [
+ (r'#(?:e(?:nd(?:if)?|lse)\b|##)|@@?[ \t]*\d+', Comment.Preproc),
+ (r'#includestr', Comment.Preproc, 'includestr directive'),
+ (r'#include', Comment.Preproc, 'include directive'),
+ (r'#[ \t]*define', Comment.Preproc, 'define directive'),
+ (r'#(?:ifn?def|undef)\b', Comment.Preproc, 'macro directive')
+ ],
+
+ 'include directive': [
+ include('whitespace'),
+ (r'([^ \t]).*?\1', String, '#pop')
+ ],
+ 'includestr directive': [
+ include('whitespace'),
+ (r'"', String, ('#pop', 'quoted string'))
+ ],
+
+ 'define directive': [
+ (r'\n', Whitespace),
+ include('whitespace'),
+ (r'([A-Z_a-z]\w*)(\()', bygroups(Comment.Preproc, Punctuation),
+ ('#pop', 'macro parameter name list')),
+ (r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'before macro body'))
+ ],
+ 'macro parameter name list': [
+ include('whitespace'),
+ (r'[A-Z_a-z]\w*', Comment.Preproc),
+ (r"['#]", Punctuation),
+ (r'\)', Punctuation, ('#pop', 'before macro body'))
+ ],
+ 'before macro body': [
+ (r'\n', Whitespace),
+ include('whitespace'),
+ (r'#', Punctuation, ('#pop', 'macro body'))
+ ],
+ 'macro body': [
+ (r'(?:\\(?!#)|[^#\\]|\n)+', Comment.Preproc),
+ (r'\\#', Comment.Preproc),
+ (r'(?<!\\)#', Punctuation, '#pop')
+ ],
+
+ 'macro directive': [
+ include('whitespace'),
+ (r'[A-Z_a-z]\w*', Comment.Preproc, '#pop')
+ ],
+
+ 'macro uses': [
+ (r'(\$[A-Z_a-z]\w*\.?)(\()', bygroups(Comment.Preproc, Punctuation),
+ 'macro parameter value list'),
+ (r'\$[A-Z_a-z]\w*(?:\.|\b)', Comment.Preproc)
+ ],
+ 'macro parameter value list': [
+ (r'(?:[^\'#"{()]|\{(?!\{))+', Comment.Preproc),
+ (r"['#]", Punctuation),
+ (r'"', String, 'macro parameter value quoted string'),
+ (r'\{\{', String, 'macro parameter value braced string'),
+ (r'\(', Comment.Preproc, 'macro parameter value parenthetical'),
+ (r'\)', Punctuation, '#pop')
+ ],
+ 'macro parameter value quoted string': [
+ (r"\\[#'()]", Comment.Preproc),
+ (r"[#'()]", Error),
+ include('quoted string')
+ ],
+ 'macro parameter value braced string': [
+ (r"\\[#'()]", Comment.Preproc),
+ (r"[#'()]", Error),
+ include('braced string')
+ ],
+ 'macro parameter value parenthetical': [
+ (r'(?:[^\\()]|\\\))+', Comment.Preproc),
+ (r'\(', Comment.Preproc, '#push'),
+ (r'\)', Comment.Preproc, '#pop')
+ ],
+
+ 'whitespace and macro uses': [
+ include('whitespace'),
+ include('macro uses')
+ ],
+
+ 'numbers': [
+ (r'\d+[Ee][+-]?\d+|(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?', Number.Float),
+ (r'(0[Xx])([0-9A-Fa-f]+)', bygroups(Keyword.Type, Number.Hex)),
+ (r'\d+', Number.Integer)
+ ],
+
+ 'quoted string': [
+ (r'"', String, '#pop'),
+ (r'[^"$]+', String),
+ include('macro uses'),
+ (r'[$]', String)
+ ],
+
+ 'braced string': [
+ # Do nothing. This must be defined in subclasses.
+ ]
+ }
+
+
+class CsoundScoreLexer(CsoundLexer):
+ """
+ For `Csound <https://csound.com>`_ scores.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Csound Score'
+ aliases = ['csound-score', 'csound-sco']
+ filenames = ['*.sco']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ include('whitespace and macro uses'),
+ include('preprocessor directives'),
+
+ (r'[aBbCdefiqstvxy]', Keyword),
+ # There is also a w statement that is generated internally and should not be
+ # used; see https://github.com/csound/csound/issues/750.
+
+ (r'z', Keyword.Constant),
+ # z is a constant equal to 800,000,000,000. 800 billion seconds is about
+ # 25,367.8 years. See also
+ # https://csound.com/docs/manual/ScoreTop.html and
+ # https://github.com/csound/csound/search?q=stof+path%3AEngine+filename%3Asread.c.
+
+ (r'([nNpP][pP])(\d+)', bygroups(Keyword, Number.Integer)),
+
+ (r'[mn]', Keyword, 'mark statement'),
+
+ include('numbers'),
+ (r'[!+\-*/^%&|<>#~.]', Operator),
+ (r'[()\[\]]', Punctuation),
+ (r'"', String, 'quoted string'),
+ (r'\{', Comment.Preproc, 'loop after left brace'),
+ ],
+
+ 'mark statement': [
+ include('whitespace and macro uses'),
+ (r'[A-Z_a-z]\w*', Name.Label),
+ (r'\n', Whitespace, '#pop')
+ ],
+
+ 'loop after left brace': [
+ include('whitespace and macro uses'),
+ (r'\d+', Number.Integer, ('#pop', 'loop after repeat count')),
+ ],
+ 'loop after repeat count': [
+ include('whitespace and macro uses'),
+ (r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'loop'))
+ ],
+ 'loop': [
+ (r'\}', Comment.Preproc, '#pop'),
+ include('root')
+ ],
+
+ # Braced strings are not allowed in Csound scores, but this is needed because the
+ # superclass includes it.
+ 'braced string': [
+ (r'\}\}', String, '#pop'),
+ (r'[^}]|\}(?!\})', String)
+ ]
+ }
+
+
+class CsoundOrchestraLexer(CsoundLexer):
+ """
+ For `Csound <https://csound.com>`_ orchestras.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Csound Orchestra'
+ aliases = ['csound', 'csound-orc']
+ filenames = ['*.orc', '*.udo']
+
+ user_defined_opcodes = set()
+
+ def opcode_name_callback(lexer, match):
+ opcode = match.group(0)
+ lexer.user_defined_opcodes.add(opcode)
+ yield match.start(), Name.Function, opcode
+
+ def name_callback(lexer, match):
+ type_annotation_token = Keyword.Type
+
+ name = match.group(1)
+ if name in OPCODES or name in DEPRECATED_OPCODES or name in REMOVED_OPCODES:
+ yield match.start(), Name.Builtin, name
+ elif name in lexer.user_defined_opcodes:
+ yield match.start(), Name.Function, name
+ else:
+ type_annotation_token = Name
+ name_match = re.search(r'^(g?[afikSw])(\w+)', name)
+ if name_match:
+ yield name_match.start(1), Keyword.Type, name_match.group(1)
+ yield name_match.start(2), Name, name_match.group(2)
+ else:
+ yield match.start(), Name, name
+
+ if match.group(2):
+ yield match.start(2), Punctuation, match.group(2)
+ yield match.start(3), type_annotation_token, match.group(3)
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+
+ (r'^([ \t]*)(\w+)(:)([ \t]+|$)', bygroups(Whitespace, Name.Label, Punctuation, Whitespace)),
+
+ include('whitespace and macro uses'),
+ include('preprocessor directives'),
+
+ (r'\binstr\b', Keyword.Declaration, 'instrument numbers and identifiers'),
+ (r'\bopcode\b', Keyword.Declaration, 'after opcode keyword'),
+ (r'\b(?:end(?:in|op))\b', Keyword.Declaration),
+
+ include('partial statements')
+ ],
+
+ 'partial statements': [
+ (r'\b(?:0dbfs|A4|k(?:r|smps)|nchnls(?:_i)?|sr)\b', Name.Variable.Global),
+
+ include('numbers'),
+
+ (r'\+=|-=|\*=|/=|<<|>>|<=|>=|==|!=|&&|\|\||[~¬]|[=!+\-*/^%&|<>#?:]', Operator),
+ (r'[(),\[\]]', Punctuation),
+
+ (r'"', String, 'quoted string'),
+ (r'\{\{', String, 'braced string'),
+
+ (words((
+ 'do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', 'kthen',
+ 'od', 'then', 'until', 'while',
+ ), prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(('return', 'rireturn'), prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
+
+ (r'\b[ik]?goto\b', Keyword, 'goto label'),
+ (r'\b(r(?:einit|igoto)|tigoto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
+ 'goto label'),
+ (r'\b(c(?:g|in?|k|nk?)goto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
+ ('goto label', 'goto argument')),
+ (r'\b(timout)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
+ ('goto label', 'goto argument', 'goto argument')),
+ (r'\b(loop_[gl][et])(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
+ ('goto label', 'goto argument', 'goto argument', 'goto argument')),
+
+ (r'\bprintk?s\b', Name.Builtin, 'prints opcode'),
+ (r'\b(?:readscore|scoreline(?:_i)?)\b', Name.Builtin, 'Csound score opcode'),
+ (r'\bpyl?run[it]?\b', Name.Builtin, 'Python opcode'),
+ (r'\blua_(?:exec|opdef)\b', Name.Builtin, 'Lua opcode'),
+ (r'\bp\d+\b', Name.Variable.Instance),
+ (r'\b([A-Z_a-z]\w*)(?:(:)([A-Za-z]))?\b', name_callback)
+ ],
+
+ 'instrument numbers and identifiers': [
+ include('whitespace and macro uses'),
+ (r'\d+|[A-Z_a-z]\w*', Name.Function),
+ (r'[+,]', Punctuation),
+ (r'\n', Whitespace, '#pop')
+ ],
+
+ 'after opcode keyword': [
+ include('whitespace and macro uses'),
+ (r'[A-Z_a-z]\w*', opcode_name_callback, ('#pop', 'opcode type signatures')),
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'opcode type signatures': [
+ include('whitespace and macro uses'),
+
+ # https://github.com/csound/csound/search?q=XIDENT+path%3AEngine+filename%3Acsound_orc.lex
+ (r'0|[afijkKoOpPStV\[\]]+', Keyword.Type),
+
+ (r',', Punctuation),
+ (r'\n', Whitespace, '#pop')
+ ],
+
+ 'quoted string': [
+ (r'"', String, '#pop'),
+ (r'[^\\"$%)]+', String),
+ include('macro uses'),
+ include('escape sequences'),
+ include('format specifiers'),
+ (r'[\\$%)]', String)
+ ],
+ 'braced string': [
+ (r'\}\}', String, '#pop'),
+ (r'(?:[^\\%)}]|\}(?!\}))+', String),
+ include('escape sequences'),
+ include('format specifiers'),
+ (r'[\\%)]', String)
+ ],
+ 'escape sequences': [
+ # https://github.com/csound/csound/search?q=unquote_string+path%3AEngine+filename%3Acsound_orc_compile.c
+ (r'\\(?:[\\abnrt"]|[0-7]{1,3})', String.Escape)
+ ],
+ # Format specifiers are highlighted in all strings, even though only
+ # fprintks https://csound.com/docs/manual/fprintks.html
+ # fprints https://csound.com/docs/manual/fprints.html
+ # printf/printf_i https://csound.com/docs/manual/printf.html
+ # printks https://csound.com/docs/manual/printks.html
+ # prints https://csound.com/docs/manual/prints.html
+ # sprintf https://csound.com/docs/manual/sprintf.html
+ # sprintfk https://csound.com/docs/manual/sprintfk.html
+ # work with strings that contain format specifiers. In addition, these opcodes’
+ # handling of format specifiers is inconsistent:
+ # - fprintks and fprints accept %a and %A specifiers, and accept %s specifiers
+ # starting in Csound 6.15.0.
+ # - printks and prints accept %a and %A specifiers, but don’t accept %s
+ # specifiers.
+ # - printf, printf_i, sprintf, and sprintfk don’t accept %a and %A specifiers,
+ # but accept %s specifiers.
+ # See https://github.com/csound/csound/issues/747 for more information.
+ 'format specifiers': [
+ (r'%[#0\- +]*\d*(?:\.\d+)?[AE-GXac-giosux]', String.Interpol),
+ (r'%%', String.Escape)
+ ],
+
+ 'goto argument': [
+ include('whitespace and macro uses'),
+ (r',', Punctuation, '#pop'),
+ include('partial statements')
+ ],
+ 'goto label': [
+ include('whitespace and macro uses'),
+ (r'\w+', Name.Label, '#pop'),
+ default('#pop')
+ ],
+
+ 'prints opcode': [
+ include('whitespace and macro uses'),
+ (r'"', String, 'prints quoted string'),
+ default('#pop')
+ ],
+ 'prints quoted string': [
+ (r'\\\\[aAbBnNrRtT]', String.Escape),
+ (r'%[!nNrRtT]|[~^]{1,2}', String.Escape),
+ include('quoted string')
+ ],
+
+ 'Csound score opcode': [
+ include('whitespace and macro uses'),
+ (r'"', String, 'quoted string'),
+ (r'\{\{', String, 'Csound score'),
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'Csound score': [
+ (r'\}\}', String, '#pop'),
+ (r'([^}]+)|\}(?!\})', using(CsoundScoreLexer))
+ ],
+
+ 'Python opcode': [
+ include('whitespace and macro uses'),
+ (r'"', String, 'quoted string'),
+ (r'\{\{', String, 'Python'),
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'Python': [
+ (r'\}\}', String, '#pop'),
+ (r'([^}]+)|\}(?!\})', using(PythonLexer))
+ ],
+
+ 'Lua opcode': [
+ include('whitespace and macro uses'),
+ (r'"', String, 'quoted string'),
+ (r'\{\{', String, 'Lua'),
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'Lua': [
+ (r'\}\}', String, '#pop'),
+ (r'([^}]+)|\}(?!\})', using(LuaLexer))
+ ]
+ }
+
+
+class CsoundDocumentLexer(RegexLexer):
+ """
+ For `Csound <https://csound.com>`_ documents.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Csound Document'
+ aliases = ['csound-document', 'csound-csd']
+ filenames = ['*.csd']
+
+ # These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making
+ # CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a
+ # better idea, since Csound Document files look like XML files. However, Csound
+ # Documents can contain Csound comments (preceded by //, for example) before and
+ # after the root element, unescaped bitwise AND & and less than < operators, etc. In
+ # other words, while Csound Document files look like XML files, they may not actually
+ # be XML files.
+ tokens = {
+ 'root': [
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'(?:;|//).*$', Comment.Single),
+ (r'[^/;<]+|/(?!/)', Text),
+
+ (r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')),
+ (r'<\s*CsScore', Name.Tag, ('score', 'tag')),
+ (r'<\s*[Hh][Tt][Mm][Ll]', Name.Tag, ('HTML', 'tag')),
+
+ (r'<\s*[\w:.-]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag)
+ ],
+
+ 'orchestra': [
+ (r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'),
+ (r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer))
+ ],
+ 'score': [
+ (r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'),
+ (r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer))
+ ],
+ 'HTML': [
+ (r'<\s*/\s*[Hh][Tt][Mm][Ll]\s*>', Name.Tag, '#pop'),
+ (r'(.|\n)+?(?=<\s*/\s*[Hh][Tt][Mm][Ll]\s*>)', using(HtmlLexer))
+ ],
+
+ 'tag': [
+ (r'\s+', Whitespace),
+ (r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
+ (r'/?\s*>', Name.Tag, '#pop')
+ ],
+ 'attr': [
+ (r'\s+', Whitespace),
+ (r'".*?"', String, '#pop'),
+ (r"'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop')
+ ]
+ }
diff --git a/pygments/lexers/css.py b/pygments/lexers/css.py
new file mode 100644
index 0000000..4d7cb46
--- /dev/null
+++ b/pygments/lexers/css.py
@@ -0,0 +1,602 @@
+"""
+ pygments.lexers.css
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for CSS and related stylesheet formats.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import copy
+
+from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
+ default, words, inherit
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
+ Punctuation, Whitespace
+from pygments.lexers._css_builtins import _css_properties
+
+__all__ = ['CssLexer', 'SassLexer', 'ScssLexer', 'LessCssLexer']
+
+
+# List of vendor prefixes obtained from:
+# https://www.w3.org/TR/CSS21/syndata.html#vendor-keyword-history
+_vendor_prefixes = (
+ '-ms-', 'mso-', '-moz-', '-o-', '-xv-', '-atsc-', '-wap-', '-khtml-',
+ '-webkit-', 'prince-', '-ah-', '-hp-', '-ro-', '-rim-', '-tc-',
+)
+
+# List of extended color keywords obtained from:
+# https://drafts.csswg.org/css-color/#named-colors
+_color_keywords = (
+ 'aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure', 'beige',
+ 'bisque', 'black', 'blanchedalmond', 'blue', 'blueviolet', 'brown',
+ 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral',
+ 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan',
+ 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki',
+ 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred',
+ 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray',
+ 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue',
+ 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite',
+ 'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod',
+ 'gray', 'green', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred',
+ 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen',
+ 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan',
+ 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey',
+ 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue',
+ 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow',
+ 'lime', 'limegreen', 'linen', 'magenta', 'maroon', 'mediumaquamarine',
+ 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen',
+ 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise',
+ 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin',
+ 'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange',
+ 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise',
+ 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum',
+ 'powderblue', 'purple', 'rebeccapurple', 'red', 'rosybrown', 'royalblue',
+ 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna',
+ 'silver', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow',
+ 'springgreen', 'steelblue', 'tan', 'teal', 'thistle', 'tomato', 'turquoise',
+ 'violet', 'wheat', 'white', 'whitesmoke', 'yellow', 'yellowgreen',
+) + ('transparent',)
+
+# List of keyword values obtained from:
+# http://cssvalues.com/
+_keyword_values = (
+ 'absolute', 'alias', 'all', 'all-petite-caps', 'all-scroll',
+ 'all-small-caps', 'allow-end', 'alpha', 'alternate', 'alternate-reverse',
+ 'always', 'armenian', 'auto', 'avoid', 'avoid-column', 'avoid-page',
+ 'backwards', 'balance', 'baseline', 'below', 'blink', 'block', 'bold',
+ 'bolder', 'border-box', 'both', 'bottom', 'box-decoration', 'break-word',
+ 'capitalize', 'cell', 'center', 'circle', 'clip', 'clone', 'close-quote',
+ 'col-resize', 'collapse', 'color', 'color-burn', 'color-dodge', 'column',
+ 'column-reverse', 'compact', 'condensed', 'contain', 'container',
+ 'content-box', 'context-menu', 'copy', 'cover', 'crisp-edges', 'crosshair',
+ 'currentColor', 'cursive', 'darken', 'dashed', 'decimal',
+ 'decimal-leading-zero', 'default', 'descendants', 'difference', 'digits',
+ 'disc', 'distribute', 'dot', 'dotted', 'double', 'double-circle', 'e-resize',
+ 'each-line', 'ease', 'ease-in', 'ease-in-out', 'ease-out', 'edges',
+ 'ellipsis', 'end', 'ew-resize', 'exclusion', 'expanded', 'extra-condensed',
+ 'extra-expanded', 'fantasy', 'fill', 'fill-box', 'filled', 'first', 'fixed',
+ 'flat', 'flex', 'flex-end', 'flex-start', 'flip', 'force-end', 'forwards',
+ 'from-image', 'full-width', 'geometricPrecision', 'georgian', 'groove',
+ 'hanging', 'hard-light', 'help', 'hidden', 'hide', 'horizontal', 'hue',
+ 'icon', 'infinite', 'inherit', 'initial', 'ink', 'inline', 'inline-block',
+ 'inline-flex', 'inline-table', 'inset', 'inside', 'inter-word', 'invert',
+ 'isolate', 'italic', 'justify', 'large', 'larger', 'last', 'left',
+ 'lighten', 'lighter', 'line-through', 'linear', 'list-item', 'local',
+ 'loose', 'lower-alpha', 'lower-greek', 'lower-latin', 'lower-roman',
+ 'lowercase', 'ltr', 'luminance', 'luminosity', 'mandatory', 'manipulation',
+ 'manual', 'margin-box', 'match-parent', 'medium', 'mixed', 'monospace',
+ 'move', 'multiply', 'n-resize', 'ne-resize', 'nesw-resize',
+ 'no-close-quote', 'no-drop', 'no-open-quote', 'no-repeat', 'none', 'normal',
+ 'not-allowed', 'nowrap', 'ns-resize', 'nw-resize', 'nwse-resize', 'objects',
+ 'oblique', 'off', 'on', 'open', 'open-quote', 'optimizeLegibility',
+ 'optimizeSpeed', 'outset', 'outside', 'over', 'overlay', 'overline',
+ 'padding-box', 'page', 'pan-down', 'pan-left', 'pan-right', 'pan-up',
+ 'pan-x', 'pan-y', 'paused', 'petite-caps', 'pixelated', 'pointer',
+ 'preserve-3d', 'progress', 'proximity', 'relative', 'repeat',
+ 'repeat no-repeat', 'repeat-x', 'repeat-y', 'reverse', 'ridge', 'right',
+ 'round', 'row', 'row-resize', 'row-reverse', 'rtl', 'ruby', 'ruby-base',
+ 'ruby-base-container', 'ruby-text', 'ruby-text-container', 'run-in',
+ 'running', 's-resize', 'sans-serif', 'saturation', 'scale-down', 'screen',
+ 'scroll', 'se-resize', 'semi-condensed', 'semi-expanded', 'separate',
+ 'serif', 'sesame', 'show', 'sideways', 'sideways-left', 'sideways-right',
+ 'slice', 'small', 'small-caps', 'smaller', 'smooth', 'snap', 'soft-light',
+ 'solid', 'space', 'space-around', 'space-between', 'spaces', 'square',
+ 'start', 'static', 'step-end', 'step-start', 'sticky', 'stretch', 'strict',
+ 'stroke-box', 'style', 'sw-resize', 'table', 'table-caption', 'table-cell',
+ 'table-column', 'table-column-group', 'table-footer-group',
+ 'table-header-group', 'table-row', 'table-row-group', 'text', 'thick',
+ 'thin', 'titling-caps', 'to', 'top', 'triangle', 'ultra-condensed',
+ 'ultra-expanded', 'under', 'underline', 'unicase', 'unset', 'upper-alpha',
+ 'upper-latin', 'upper-roman', 'uppercase', 'upright', 'use-glyph-orientation',
+ 'vertical', 'vertical-text', 'view-box', 'visible', 'w-resize', 'wait',
+ 'wavy', 'weight', 'weight style', 'wrap', 'wrap-reverse', 'x-large',
+ 'x-small', 'xx-large', 'xx-small', 'zoom-in', 'zoom-out',
+)
+
+# List of other keyword values from other sources:
+_other_keyword_values = (
+ 'above', 'aural', 'behind', 'bidi-override', 'center-left', 'center-right',
+ 'cjk-ideographic', 'continuous', 'crop', 'cross', 'embed', 'far-left',
+ 'far-right', 'fast', 'faster', 'hebrew', 'high', 'higher', 'hiragana',
+ 'hiragana-iroha', 'katakana', 'katakana-iroha', 'landscape', 'left-side',
+ 'leftwards', 'level', 'loud', 'low', 'lower', 'message-box', 'middle',
+ 'mix', 'narrower', 'once', 'portrait', 'right-side', 'rightwards', 'silent',
+ 'slow', 'slower', 'small-caption', 'soft', 'spell-out', 'status-bar',
+ 'super', 'text-bottom', 'text-top', 'wider', 'x-fast', 'x-high', 'x-loud',
+ 'x-low', 'x-soft', 'yes', 'pre', 'pre-wrap', 'pre-line',
+)
+
+# List of functional notation and function keyword values:
+_functional_notation_keyword_values = (
+ 'attr', 'blackness', 'blend', 'blenda', 'blur', 'brightness', 'calc',
+ 'circle', 'color-mod', 'contrast', 'counter', 'cubic-bezier', 'device-cmyk',
+ 'drop-shadow', 'ellipse', 'gray', 'grayscale', 'hsl', 'hsla', 'hue',
+ 'hue-rotate', 'hwb', 'image', 'inset', 'invert', 'lightness',
+ 'linear-gradient', 'matrix', 'matrix3d', 'opacity', 'perspective',
+ 'polygon', 'radial-gradient', 'rect', 'repeating-linear-gradient',
+ 'repeating-radial-gradient', 'rgb', 'rgba', 'rotate', 'rotate3d', 'rotateX',
+ 'rotateY', 'rotateZ', 'saturate', 'saturation', 'scale', 'scale3d',
+ 'scaleX', 'scaleY', 'scaleZ', 'sepia', 'shade', 'skewX', 'skewY', 'steps',
+ 'tint', 'toggle', 'translate', 'translate3d', 'translateX', 'translateY',
+ 'translateZ', 'whiteness',
+)
+# Note! Handle url(...) separately.
+
+# List of units obtained from:
+# https://www.w3.org/TR/css3-values/
+_angle_units = (
+ 'deg', 'grad', 'rad', 'turn',
+)
+_frequency_units = (
+ 'Hz', 'kHz',
+)
+_length_units = (
+ 'em', 'ex', 'ch', 'rem',
+ 'vh', 'vw', 'vmin', 'vmax',
+ 'px', 'mm', 'cm', 'in', 'pt', 'pc', 'q',
+)
+_resolution_units = (
+ 'dpi', 'dpcm', 'dppx',
+)
+_time_units = (
+ 's', 'ms',
+)
+_all_units = _angle_units + _frequency_units + _length_units + \
+ _resolution_units + _time_units
+
+
+class CssLexer(RegexLexer):
+ """
+ For CSS (Cascading Style Sheets).
+ """
+
+ name = 'CSS'
+ url = 'https://www.w3.org/TR/CSS/#css'
+ aliases = ['css']
+ filenames = ['*.css']
+ mimetypes = ['text/css']
+
+ tokens = {
+ 'root': [
+ include('basics'),
+ ],
+ 'basics': [
+ (r'\s+', Whitespace),
+ (r'/\*(?:.|\n)*?\*/', Comment),
+ (r'\{', Punctuation, 'content'),
+ (r'(\:{1,2})([\w-]+)', bygroups(Punctuation, Name.Decorator)),
+ (r'(\.)([\w-]+)', bygroups(Punctuation, Name.Class)),
+ (r'(\#)([\w-]+)', bygroups(Punctuation, Name.Namespace)),
+ (r'(@)([\w-]+)', bygroups(Punctuation, Keyword), 'atrule'),
+ (r'[\w-]+', Name.Tag),
+ (r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ],
+ 'atrule': [
+ (r'\{', Punctuation, 'atcontent'),
+ (r';', Punctuation, '#pop'),
+ include('basics'),
+ ],
+ 'atcontent': [
+ include('basics'),
+ (r'\}', Punctuation, '#pop:2'),
+ ],
+ 'content': [
+ (r'\s+', Whitespace),
+ (r'\}', Punctuation, '#pop'),
+ (r';', Punctuation),
+ (r'^@.*?$', Comment.Preproc),
+
+ (words(_vendor_prefixes,), Keyword.Pseudo),
+ (r'('+r'|'.join(_css_properties)+r')(\s*)(\:)',
+ bygroups(Keyword, Whitespace, Punctuation), 'value-start'),
+ (r'([-]+[a-zA-Z_][\w-]*)(\s*)(\:)', bygroups(Name.Variable, Whitespace, Punctuation),
+ 'value-start'),
+ (r'([a-zA-Z_][\w-]*)(\s*)(\:)', bygroups(Name, Whitespace, Punctuation),
+ 'value-start'),
+
+ (r'/\*(?:.|\n)*?\*/', Comment),
+ ],
+ 'value-start': [
+ (r'\s+', Whitespace),
+ (words(_vendor_prefixes,), Name.Builtin.Pseudo),
+ include('urls'),
+ (r'('+r'|'.join(_functional_notation_keyword_values)+r')(\()',
+ bygroups(Name.Builtin, Punctuation), 'function-start'),
+ (r'([a-zA-Z_][\w-]+)(\()',
+ bygroups(Name.Function, Punctuation), 'function-start'),
+ (words(_keyword_values, suffix=r'\b'), Keyword.Constant),
+ (words(_other_keyword_values, suffix=r'\b'), Keyword.Constant),
+ (words(_color_keywords, suffix=r'\b'), Keyword.Constant),
+ # for transition-property etc.
+ (words(_css_properties, suffix=r'\b'), Keyword),
+ (r'\!important', Comment.Preproc),
+ (r'/\*(?:.|\n)*?\*/', Comment),
+
+ include('numeric-values'),
+
+ (r'[~^*!%&<>|+=@:./?-]+', Operator),
+ (r'[\[\](),]+', Punctuation),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[a-zA-Z_][\w-]*', Name),
+ (r';', Punctuation, '#pop'),
+ (r'\}', Punctuation, '#pop:2'),
+ ],
+ 'function-start': [
+ (r'\s+', Whitespace),
+ (r'[-]+([\w+]+[-]*)+', Name.Variable),
+ include('urls'),
+ (words(_vendor_prefixes,), Keyword.Pseudo),
+ (words(_keyword_values, suffix=r'\b'), Keyword.Constant),
+ (words(_other_keyword_values, suffix=r'\b'), Keyword.Constant),
+ (words(_color_keywords, suffix=r'\b'), Keyword.Constant),
+
+ # function-start may be entered recursively
+ (r'(' + r'|'.join(_functional_notation_keyword_values) + r')(\()',
+ bygroups(Name.Builtin, Punctuation), 'function-start'),
+ (r'([a-zA-Z_][\w-]+)(\()',
+ bygroups(Name.Function, Punctuation), 'function-start'),
+
+ (r'/\*(?:.|\n)*?\*/', Comment),
+ include('numeric-values'),
+ (r'[*+/-]', Operator),
+ (r',', Punctuation),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[a-zA-Z_-]\w*', Name),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'urls': [
+ (r'(url)(\()(".*?")(\))', bygroups(Name.Builtin, Punctuation,
+ String.Double, Punctuation)),
+ (r"(url)(\()('.*?')(\))", bygroups(Name.Builtin, Punctuation,
+ String.Single, Punctuation)),
+ (r'(url)(\()(.*?)(\))', bygroups(Name.Builtin, Punctuation,
+ String.Other, Punctuation)),
+ ],
+ 'numeric-values': [
+ (r'\#[a-zA-Z0-9]{1,6}', Number.Hex),
+ (r'[+\-]?[0-9]*[.][0-9]+', Number.Float, 'numeric-end'),
+ (r'[+\-]?[0-9]+', Number.Integer, 'numeric-end'),
+ ],
+ 'numeric-end': [
+ (words(_all_units, suffix=r'\b'), Keyword.Type),
+ (r'%', Keyword.Type),
+ default('#pop'),
+ ],
+ }
+
+
+common_sass_tokens = {
+ 'value': [
+ (r'[ \t]+', Whitespace),
+ (r'[!$][\w-]+', Name.Variable),
+ (r'url\(', String.Other, 'string-url'),
+ (r'[a-z_-][\w-]*(?=\()', Name.Function),
+ (words(_css_properties + (
+ 'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
+ 'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
+ 'capitalize', 'center-left', 'center-right', 'center', 'circle',
+ 'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
+ 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
+ 'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
+ 'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
+ 'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
+ 'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
+ 'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
+ 'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
+ 'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
+ 'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
+ 'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
+ 'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
+ 'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
+ 'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
+ 'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
+ 'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
+ 'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
+ 'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
+ 'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
+ 'table-caption', 'table-cell', 'table-column', 'table-column-group',
+ 'table-footer-group', 'table-header-group', 'table-row',
+ 'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
+ 'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
+ 'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
+ 'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
+ 'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
+ Name.Constant),
+ (words(_color_keywords, suffix=r'\b'), Name.Entity),
+ (words((
+ 'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
+ 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
+ Name.Builtin),
+ (r'\!(important|default)', Name.Exception),
+ (r'(true|false)', Name.Pseudo),
+ (r'(and|or|not)', Operator.Word),
+ (r'/\*', Comment.Multiline, 'inline-comment'),
+ (r'//[^\n]*', Comment.Single),
+ (r'\#[a-z0-9]{1,6}', Number.Hex),
+ (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
+ (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'[~^*!&%<>|+=@:,./?-]+', Operator),
+ (r'[\[\]()]+', Punctuation),
+ (r'"', String.Double, 'string-double'),
+ (r"'", String.Single, 'string-single'),
+ (r'[a-z_-][\w-]*', Name),
+ ],
+
+ 'interpolation': [
+ (r'\}', String.Interpol, '#pop'),
+ include('value'),
+ ],
+
+ 'selector': [
+ (r'[ \t]+', Whitespace),
+ (r'\:', Name.Decorator, 'pseudo-class'),
+ (r'\.', Name.Class, 'class'),
+ (r'\#', Name.Namespace, 'id'),
+ (r'[\w-]+', Name.Tag),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'&', Keyword),
+ (r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
+ (r'"', String.Double, 'string-double'),
+ (r"'", String.Single, 'string-single'),
+ ],
+
+ 'string-double': [
+ (r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'"', String.Double, '#pop'),
+ ],
+
+ 'string-single': [
+ (r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Single),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r"'", String.Single, '#pop'),
+ ],
+
+ 'string-url': [
+ (r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'\)', String.Other, '#pop'),
+ ],
+
+ 'pseudo-class': [
+ (r'[\w-]+', Name.Decorator),
+ (r'#\{', String.Interpol, 'interpolation'),
+ default('#pop'),
+ ],
+
+ 'class': [
+ (r'[\w-]+', Name.Class),
+ (r'#\{', String.Interpol, 'interpolation'),
+ default('#pop'),
+ ],
+
+ 'id': [
+ (r'[\w-]+', Name.Namespace),
+ (r'#\{', String.Interpol, 'interpolation'),
+ default('#pop'),
+ ],
+
+ 'for': [
+ (r'(from|to|through)', Operator.Word),
+ include('value'),
+ ],
+}
+
+
+def _indentation(lexer, match, ctx):
+ indentation = match.group(0)
+ yield match.start(), Whitespace, indentation
+ ctx.last_indentation = indentation
+ ctx.pos = match.end()
+
+ if hasattr(ctx, 'block_state') and ctx.block_state and \
+ indentation.startswith(ctx.block_indentation) and \
+ indentation != ctx.block_indentation:
+ ctx.stack.append(ctx.block_state)
+ else:
+ ctx.block_state = None
+ ctx.block_indentation = None
+ ctx.stack.append('content')
+
+
+def _starts_block(token, state):
+ def callback(lexer, match, ctx):
+ yield match.start(), token, match.group(0)
+
+ if hasattr(ctx, 'last_indentation'):
+ ctx.block_indentation = ctx.last_indentation
+ else:
+ ctx.block_indentation = ''
+
+ ctx.block_state = state
+ ctx.pos = match.end()
+
+ return callback
+
+
+class SassLexer(ExtendedRegexLexer):
+ """
+ For Sass stylesheets.
+
+ .. versionadded:: 1.3
+ """
+
+ name = 'Sass'
+ url = 'https://sass-lang.com/'
+ aliases = ['sass']
+ filenames = ['*.sass']
+ mimetypes = ['text/x-sass']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Whitespace),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'content': [
+ (r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
+ 'root'),
+ (r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
+ 'root'),
+ (r'@import', Keyword, 'import'),
+ (r'@for', Keyword, 'for'),
+ (r'@(debug|warn|if|while)', Keyword, 'value'),
+ (r'(@mixin)( )([\w-]+)', bygroups(Keyword, Whitespace, Name.Function), 'value'),
+ (r'(@include)( )([\w-]+)', bygroups(Keyword, Whitespace, Name.Decorator), 'value'),
+ (r'@extend', Keyword, 'selector'),
+ (r'@[\w-]+', Keyword, 'selector'),
+ (r'=[\w-]+', Name.Function, 'value'),
+ (r'\+[\w-]+', Name.Decorator, 'value'),
+ (r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
+ bygroups(Name.Variable, Operator), 'value'),
+ (r':', Name.Attribute, 'old-style-attr'),
+ (r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
+ default('selector'),
+ ],
+
+ 'single-comment': [
+ (r'.+', Comment.Single),
+ (r'\n', Whitespace, 'root'),
+ ],
+
+ 'multi-comment': [
+ (r'.+', Comment.Multiline),
+ (r'\n', Whitespace, 'root'),
+ ],
+
+ 'import': [
+ (r'[ \t]+', Whitespace),
+ (r'\S+', String),
+ (r'\n', Whitespace, 'root'),
+ ],
+
+ 'old-style-attr': [
+ (r'[^\s:="\[]+', Name.Attribute),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'([ \t]*)(=)', bygroups(Whitespace, Operator), 'value'),
+ default('value'),
+ ],
+
+ 'new-style-attr': [
+ (r'[^\s:="\[]+', Name.Attribute),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'([ \t]*)([=:])', bygroups(Whitespace, Operator), 'value'),
+ ],
+
+ 'inline-comment': [
+ (r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r"\*/", Comment, '#pop'),
+ ],
+ }
+ for group, common in common_sass_tokens.items():
+ tokens[group] = copy.copy(common)
+ tokens['value'].append((r'\n', Whitespace, 'root'))
+ tokens['selector'].append((r'\n', Whitespace, 'root'))
+
+
+class ScssLexer(RegexLexer):
+ """
+ For SCSS stylesheets.
+ """
+
+ name = 'SCSS'
+ url = 'https://sass-lang.com/'
+ aliases = ['scss']
+ filenames = ['*.scss']
+ mimetypes = ['text/x-scss']
+
+ flags = re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@import', Keyword, 'value'),
+ (r'@for', Keyword, 'for'),
+ (r'@(debug|warn|if|while)', Keyword, 'value'),
+ (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
+ (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
+ (r'@extend', Keyword, 'selector'),
+ (r'(@media)(\s+)', bygroups(Keyword, Whitespace), 'value'),
+ (r'@[\w-]+', Keyword, 'selector'),
+ (r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
+ # TODO: broken, and prone to infinite loops.
+ # (r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
+ # (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
+ default('selector'),
+ ],
+
+ 'attr': [
+ (r'[^\s:="\[]+', Name.Attribute),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'[ \t]*:', Operator, 'value'),
+ default('#pop'),
+ ],
+
+ 'inline-comment': [
+ (r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r"\*/", Comment, '#pop'),
+ ],
+ }
+ for group, common in common_sass_tokens.items():
+ tokens[group] = copy.copy(common)
+ tokens['value'].extend([(r'\n', Whitespace), (r'[;{}]', Punctuation, '#pop')])
+ tokens['selector'].extend([(r'\n', Whitespace), (r'[;{}]', Punctuation, '#pop')])
+
+
+class LessCssLexer(CssLexer):
+ """
+ For LESS styleshets.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'LessCss'
+ url = 'http://lesscss.org/'
+ aliases = ['less']
+ filenames = ['*.less']
+ mimetypes = ['text/x-less-css']
+
+ tokens = {
+ 'root': [
+ (r'@\w+', Name.Variable),
+ inherit,
+ ],
+ 'content': [
+ (r'\{', Punctuation, '#push'),
+ (r'//.*\n', Comment.Single),
+ inherit,
+ ],
+ }
diff --git a/pygments/lexers/d.py b/pygments/lexers/d.py
new file mode 100644
index 0000000..10fa3b5
--- /dev/null
+++ b/pygments/lexers/d.py
@@ -0,0 +1,258 @@
+"""
+ pygments.lexers.d
+ ~~~~~~~~~~~~~~~~~
+
+ Lexers for D languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words, bygroups
+from pygments.token import Comment, Keyword, Name, String, Number, \
+ Punctuation, Whitespace
+
+__all__ = ['DLexer', 'CrocLexer', 'MiniDLexer']
+
+
+class DLexer(RegexLexer):
+ """
+ For D source.
+
+ .. versionadded:: 1.2
+ """
+ name = 'D'
+ url = 'https://dlang.org/'
+ filenames = ['*.d', '*.di']
+ aliases = ['d']
+ mimetypes = ['text/x-dsrc']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ # (r'\\\n', Text), # line continuations
+ # Comments
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'/\+', Comment.Multiline, 'nested_comment'),
+ # Keywords
+ (words((
+ 'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body',
+ 'break', 'case', 'cast', 'catch', 'class', 'const', 'continue',
+ 'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else',
+ 'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse',
+ 'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import',
+ 'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin',
+ 'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma',
+ 'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope',
+ 'shared', 'static', 'struct', 'super', 'switch', 'synchronized',
+ 'template', 'this', 'throw', 'try', 'typeid', 'typeof',
+ 'union', 'unittest', 'version', 'volatile', 'while', 'with',
+ '__gshared', '__traits', '__vector', '__parameters'),
+ suffix=r'\b'),
+ Keyword),
+ (words((
+ # Removed in 2.072
+ 'typedef', ),
+ suffix=r'\b'),
+ Keyword.Removed),
+ (words((
+ 'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal',
+ 'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal',
+ 'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong',
+ 'ushort', 'void', 'wchar'), suffix=r'\b'),
+ Keyword.Type),
+ (r'(false|true|null)\b', Keyword.Constant),
+ (words((
+ '__FILE__', '__FILE_FULL_PATH__', '__MODULE__', '__LINE__', '__FUNCTION__',
+ '__PRETTY_FUNCTION__', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__',
+ '__VENDOR__', '__VERSION__'), suffix=r'\b'),
+ Keyword.Pseudo),
+ (r'macro\b', Keyword.Reserved),
+ (r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
+ # FloatLiteral
+ # -- HexFloat
+ (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
+ r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
+ # -- DecimalFloat
+ (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+ r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
+ (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
+ # IntegerLiteral
+ # -- Binary
+ (r'0[Bb][01_]+', Number.Bin),
+ # -- Octal
+ (r'0[0-7_]+', Number.Oct),
+ # -- Hexadecimal
+ (r'0[xX][0-9a-fA-F_]+', Number.Hex),
+ # -- Decimal
+ (r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
+ # CharacterLiteral
+ (r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
+ r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
+ String.Char),
+ # StringLiteral
+ # -- WysiwygString
+ (r'r"[^"]*"[cwd]?', String),
+ # -- AlternateWysiwygString
+ (r'`[^`]*`[cwd]?', String),
+ # -- DoubleQuotedString
+ (r'"(\\\\|\\[^\\]|[^"\\])*"[cwd]?', String),
+ # -- EscapeSequence
+ (r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
+ r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
+ String),
+ # -- HexString
+ (r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
+ # -- DelimitedString
+ (r'q"\[', String, 'delimited_bracket'),
+ (r'q"\(', String, 'delimited_parenthesis'),
+ (r'q"<', String, 'delimited_angle'),
+ (r'q"\{', String, 'delimited_curly'),
+ (r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
+ (r'q"(.).*?\1"', String),
+ # -- TokenString
+ (r'q\{', String, 'token_string'),
+ # Attributes
+ (r'@([a-zA-Z_]\w*)?', Name.Decorator),
+ # Tokens
+ (r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
+ r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
+ r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation),
+ # Identifier
+ (r'[a-zA-Z_]\w*', Name),
+ # Line
+ (r'(#line)(\s)(.*)(\n)', bygroups(Comment.Special, Whitespace,
+ Comment.Special, Whitespace)),
+ ],
+ 'nested_comment': [
+ (r'[^+/]+', Comment.Multiline),
+ (r'/\+', Comment.Multiline, '#push'),
+ (r'\+/', Comment.Multiline, '#pop'),
+ (r'[+/]', Comment.Multiline),
+ ],
+ 'token_string': [
+ (r'\{', Punctuation, 'token_string_nest'),
+ (r'\}', String, '#pop'),
+ include('root'),
+ ],
+ 'token_string_nest': [
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'delimited_bracket': [
+ (r'[^\[\]]+', String),
+ (r'\[', String, 'delimited_inside_bracket'),
+ (r'\]"', String, '#pop'),
+ ],
+ 'delimited_inside_bracket': [
+ (r'[^\[\]]+', String),
+ (r'\[', String, '#push'),
+ (r'\]', String, '#pop'),
+ ],
+ 'delimited_parenthesis': [
+ (r'[^()]+', String),
+ (r'\(', String, 'delimited_inside_parenthesis'),
+ (r'\)"', String, '#pop'),
+ ],
+ 'delimited_inside_parenthesis': [
+ (r'[^()]+', String),
+ (r'\(', String, '#push'),
+ (r'\)', String, '#pop'),
+ ],
+ 'delimited_angle': [
+ (r'[^<>]+', String),
+ (r'<', String, 'delimited_inside_angle'),
+ (r'>"', String, '#pop'),
+ ],
+ 'delimited_inside_angle': [
+ (r'[^<>]+', String),
+ (r'<', String, '#push'),
+ (r'>', String, '#pop'),
+ ],
+ 'delimited_curly': [
+ (r'[^{}]+', String),
+ (r'\{', String, 'delimited_inside_curly'),
+ (r'\}"', String, '#pop'),
+ ],
+ 'delimited_inside_curly': [
+ (r'[^{}]+', String),
+ (r'\{', String, '#push'),
+ (r'\}', String, '#pop'),
+ ],
+ }
+
+
+class CrocLexer(RegexLexer):
+ """
+ For Croc source.
+ """
+ name = 'Croc'
+ url = 'http://jfbillingsley.com/croc'
+ filenames = ['*.croc']
+ aliases = ['croc']
+ mimetypes = ['text/x-crocsrc']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ # Comments
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*', Comment.Multiline, 'nestedcomment'),
+ # Keywords
+ (words((
+ 'as', 'assert', 'break', 'case', 'catch', 'class', 'continue',
+ 'default', 'do', 'else', 'finally', 'for', 'foreach', 'function',
+ 'global', 'namespace', 'if', 'import', 'in', 'is', 'local',
+ 'module', 'return', 'scope', 'super', 'switch', 'this', 'throw',
+ 'try', 'vararg', 'while', 'with', 'yield'), suffix=r'\b'),
+ Keyword),
+ (r'(false|true|null)\b', Keyword.Constant),
+ # FloatLiteral
+ (r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?',
+ Number.Float),
+ # IntegerLiteral
+ # -- Binary
+ (r'0[bB][01][01_]*', Number.Bin),
+ # -- Hexadecimal
+ (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex),
+ # -- Decimal
+ (r'([0-9][0-9_]*)(?![.eE])', Number.Integer),
+ # CharacterLiteral
+ (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
+ r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
+ String.Char),
+ # StringLiteral
+ # -- WysiwygString
+ (r'@"(""|[^"])*"', String),
+ (r'@`(``|[^`])*`', String),
+ (r"@'(''|[^'])*'", String),
+ # -- DoubleQuotedString
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # Tokens
+ (r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
+ r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
+ r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation),
+ # Identifier
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'nestedcomment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ }
+
+
+class MiniDLexer(CrocLexer):
+ """
+ For MiniD source. MiniD is now known as Croc.
+ """
+ name = 'MiniD'
+ filenames = [] # don't lex .md as MiniD, reserve for Markdown
+ aliases = ['minid']
+ mimetypes = ['text/x-minidsrc']
diff --git a/pygments/lexers/dalvik.py b/pygments/lexers/dalvik.py
new file mode 100644
index 0000000..4380f0e
--- /dev/null
+++ b/pygments/lexers/dalvik.py
@@ -0,0 +1,127 @@
+"""
+ pygments.lexers.dalvik
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Pygments lexers for Dalvik VM-related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Keyword, Text, Comment, Name, String, Number, \
+ Punctuation, Whitespace
+
+__all__ = ['SmaliLexer']
+
+
+class SmaliLexer(RegexLexer):
+ """
+ For Smali (Android/Dalvik) assembly
+ code.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Smali'
+ url = 'http://code.google.com/p/smali/'
+ aliases = ['smali']
+ filenames = ['*.smali']
+ mimetypes = ['text/smali']
+
+ tokens = {
+ 'root': [
+ include('comment'),
+ include('label'),
+ include('field'),
+ include('method'),
+ include('class'),
+ include('directive'),
+ include('access-modifier'),
+ include('instruction'),
+ include('literal'),
+ include('punctuation'),
+ include('type'),
+ include('whitespace')
+ ],
+ 'directive': [
+ (r'^([ \t]*)(\.(?:class|super|implements|field|subannotation|annotation|'
+ r'enum|method|registers|locals|array-data|packed-switch|'
+ r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
+ r'epilogue|source))', bygroups(Whitespace, Keyword)),
+ (r'^([ \t]*)(\.end)( )(field|subannotation|annotation|method|array-data|'
+ 'packed-switch|sparse-switch|parameter|local)',
+ bygroups(Whitespace, Keyword, Whitespace, Keyword)),
+ (r'^([ \t]*)(\.restart)( )(local)',
+ bygroups(Whitespace, Keyword, Whitespace, Keyword)),
+ ],
+ 'access-modifier': [
+ (r'(public|private|protected|static|final|synchronized|bridge|'
+ r'varargs|native|abstract|strictfp|synthetic|constructor|'
+ r'declared-synchronized|interface|enum|annotation|volatile|'
+ r'transient)', Keyword),
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ ],
+ 'instruction': [
+ (r'\b[vp]\d+\b', Name.Builtin), # registers
+ (r'(\b[a-z][A-Za-z0-9/-]+)(\s+)', bygroups(Text, Whitespace)), # instructions
+ ],
+ 'literal': [
+ (r'".*"', String),
+ (r'0x[0-9A-Fa-f]+t?', Number.Hex),
+ (r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'[0-9]+L?', Number.Integer),
+ ],
+ 'field': [
+ (r'(\$?\b)([\w$]*)(:)',
+ bygroups(Punctuation, Name.Variable, Punctuation)),
+ ],
+ 'method': [
+ (r'<(?:cl)?init>', Name.Function), # constructor
+ (r'(\$?\b)([\w$]*)(\()',
+ bygroups(Punctuation, Name.Function, Punctuation)),
+ ],
+ 'label': [
+ (r':\w+', Name.Label),
+ ],
+ 'class': [
+ # class names in the form Lcom/namespace/ClassName;
+ # I only want to color the ClassName part, so the namespace part is
+ # treated as 'Text'
+ (r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
+ bygroups(Keyword.Type, Text, Name.Class, Text)),
+ ],
+ 'punctuation': [
+ (r'->', Punctuation),
+ (r'[{},():=.-]', Punctuation),
+ ],
+ 'type': [
+ (r'[ZBSCIJFDV\[]+', Keyword.Type),
+ ],
+ 'comment': [
+ (r'#.*?\n', Comment),
+ ],
+ }
+
+ def analyse_text(text):
+ score = 0
+ if re.search(r'^\s*\.class\s', text, re.MULTILINE):
+ score += 0.5
+ if re.search(r'\b((check-cast|instance-of|throw-verification-error'
+ r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
+ r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
+ r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
+ score += 0.3
+ if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
+ r'\b(array-data|class-change-error|declared-synchronized|'
+ r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
+ r'illegal-class-access|illegal-field-access|'
+ r'illegal-method-access|instantiation-error|no-error|'
+ r'no-such-class|no-such-field|no-such-method|'
+ r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
+ score += 0.6
+ return score
diff --git a/pygments/lexers/data.py b/pygments/lexers/data.py
new file mode 100644
index 0000000..d188730
--- /dev/null
+++ b/pygments/lexers/data.py
@@ -0,0 +1,767 @@
+"""
+ pygments.lexers.data
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for data file format.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import Lexer, ExtendedRegexLexer, LexerContext, \
+ include, bygroups
+from pygments.token import Comment, Error, Keyword, Literal, Name, Number, \
+ Punctuation, String, Whitespace
+
+__all__ = ['YamlLexer', 'JsonLexer', 'JsonBareObjectLexer', 'JsonLdLexer']
+
+
+class YamlLexerContext(LexerContext):
+ """Indentation context for the YAML lexer."""
+
+ def __init__(self, *args, **kwds):
+ super().__init__(*args, **kwds)
+ self.indent_stack = []
+ self.indent = -1
+ self.next_indent = 0
+ self.block_scalar_indent = None
+
+
+class YamlLexer(ExtendedRegexLexer):
+ """
+ Lexer for YAML, a human-friendly data serialization
+ language.
+
+ .. versionadded:: 0.11
+ """
+
+ name = 'YAML'
+ url = 'http://yaml.org/'
+ aliases = ['yaml']
+ filenames = ['*.yaml', '*.yml']
+ mimetypes = ['text/x-yaml']
+
+ def something(token_class):
+ """Do not produce empty tokens."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if not text:
+ return
+ yield match.start(), token_class, text
+ context.pos = match.end()
+ return callback
+
+ def reset_indent(token_class):
+ """Reset the indentation levels."""
+ def callback(lexer, match, context):
+ text = match.group()
+ context.indent_stack = []
+ context.indent = -1
+ context.next_indent = 0
+ context.block_scalar_indent = None
+ yield match.start(), token_class, text
+ context.pos = match.end()
+ return callback
+
+ def save_indent(token_class, start=False):
+ """Save a possible indentation level."""
+ def callback(lexer, match, context):
+ text = match.group()
+ extra = ''
+ if start:
+ context.next_indent = len(text)
+ if context.next_indent < context.indent:
+ while context.next_indent < context.indent:
+ context.indent = context.indent_stack.pop()
+ if context.next_indent > context.indent:
+ extra = text[context.indent:]
+ text = text[:context.indent]
+ else:
+ context.next_indent += len(text)
+ if text:
+ yield match.start(), token_class, text
+ if extra:
+ yield match.start()+len(text), token_class.Error, extra
+ context.pos = match.end()
+ return callback
+
+ def set_indent(token_class, implicit=False):
+ """Set the previously saved indentation level."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if context.indent < context.next_indent:
+ context.indent_stack.append(context.indent)
+ context.indent = context.next_indent
+ if not implicit:
+ context.next_indent += len(text)
+ yield match.start(), token_class, text
+ context.pos = match.end()
+ return callback
+
+ def set_block_scalar_indent(token_class):
+ """Set an explicit indentation level for a block scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ context.block_scalar_indent = None
+ if not text:
+ return
+ increment = match.group(1)
+ if increment:
+ current_indent = max(context.indent, 0)
+ increment = int(increment)
+ context.block_scalar_indent = current_indent + increment
+ if text:
+ yield match.start(), token_class, text
+ context.pos = match.end()
+ return callback
+
+ def parse_block_scalar_empty_line(indent_token_class, content_token_class):
+ """Process an empty line in a block scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if (context.block_scalar_indent is None or
+ len(text) <= context.block_scalar_indent):
+ if text:
+ yield match.start(), indent_token_class, text
+ else:
+ indentation = text[:context.block_scalar_indent]
+ content = text[context.block_scalar_indent:]
+ yield match.start(), indent_token_class, indentation
+ yield (match.start()+context.block_scalar_indent,
+ content_token_class, content)
+ context.pos = match.end()
+ return callback
+
+ def parse_block_scalar_indent(token_class):
+ """Process indentation spaces in a block scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if context.block_scalar_indent is None:
+ if len(text) <= max(context.indent, 0):
+ context.stack.pop()
+ context.stack.pop()
+ return
+ context.block_scalar_indent = len(text)
+ else:
+ if len(text) < context.block_scalar_indent:
+ context.stack.pop()
+ context.stack.pop()
+ return
+ if text:
+ yield match.start(), token_class, text
+ context.pos = match.end()
+ return callback
+
+ def parse_plain_scalar_indent(token_class):
+ """Process indentation spaces in a plain scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if len(text) <= context.indent:
+ context.stack.pop()
+ context.stack.pop()
+ return
+ if text:
+ yield match.start(), token_class, text
+ context.pos = match.end()
+ return callback
+
+ tokens = {
+ # the root rules
+ 'root': [
+ # ignored whitespaces
+ (r'[ ]+(?=#|$)', Whitespace),
+ # line breaks
+ (r'\n+', Whitespace),
+ # a comment
+ (r'#[^\n]*', Comment.Single),
+ # the '%YAML' directive
+ (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
+ # the %TAG directive
+ (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
+ # document start and document end indicators
+ (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
+ 'block-line'),
+ # indentation spaces
+ (r'[ ]*(?!\s|$)', save_indent(Whitespace, start=True),
+ ('block-line', 'indentation')),
+ ],
+
+ # trailing whitespaces after directives or a block scalar indicator
+ 'ignored-line': [
+ # ignored whitespaces
+ (r'[ ]+(?=#|$)', Whitespace),
+ # a comment
+ (r'#[^\n]*', Comment.Single),
+ # line break
+ (r'\n', Whitespace, '#pop:2'),
+ ],
+
+ # the %YAML directive
+ 'yaml-directive': [
+ # the version number
+ (r'([ ]+)([0-9]+\.[0-9]+)',
+ bygroups(Whitespace, Number), 'ignored-line'),
+ ],
+
+ # the %TAG directive
+ 'tag-directive': [
+ # a tag handle and the corresponding prefix
+ (r'([ ]+)(!|![\w-]*!)'
+ r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
+ bygroups(Whitespace, Keyword.Type, Whitespace, Keyword.Type),
+ 'ignored-line'),
+ ],
+
+ # block scalar indicators and indentation spaces
+ 'indentation': [
+ # trailing whitespaces are ignored
+ (r'[ ]*$', something(Whitespace), '#pop:2'),
+ # whitespaces preceding block collection indicators
+ (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Whitespace)),
+ # block collection indicators
+ (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
+ # the beginning a block line
+ (r'[ ]*', save_indent(Whitespace), '#pop'),
+ ],
+
+ # an indented line in the block context
+ 'block-line': [
+ # the line end
+ (r'[ ]*(?=#|$)', something(Whitespace), '#pop'),
+ # whitespaces separating tokens
+ (r'[ ]+', Whitespace),
+ # key with colon
+ (r'''([^#,?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
+ bygroups(Name.Tag, set_indent(Punctuation, implicit=True))),
+ # tags, anchors and aliases,
+ include('descriptors'),
+ # block collections and scalars
+ include('block-nodes'),
+ # flow collections and quoted scalars
+ include('flow-nodes'),
+ # a plain scalar
+ (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
+ something(Name.Variable),
+ 'plain-scalar-in-block-context'),
+ ],
+
+ # tags, anchors, aliases
+ 'descriptors': [
+ # a full-form tag
+ (r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
+ # a tag in the form '!', '!suffix' or '!handle!suffix'
+ (r'!(?:[\w-]+!)?'
+ r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]*', Keyword.Type),
+ # an anchor
+ (r'&[\w-]+', Name.Label),
+ # an alias
+ (r'\*[\w-]+', Name.Variable),
+ ],
+
+ # block collections and scalars
+ 'block-nodes': [
+ # implicit key
+ (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
+ # literal and folded scalars
+ (r'[|>]', Punctuation.Indicator,
+ ('block-scalar-content', 'block-scalar-header')),
+ ],
+
+ # flow collections and quoted scalars
+ 'flow-nodes': [
+ # a flow sequence
+ (r'\[', Punctuation.Indicator, 'flow-sequence'),
+ # a flow mapping
+ (r'\{', Punctuation.Indicator, 'flow-mapping'),
+ # a single-quoted scalar
+ (r'\'', String, 'single-quoted-scalar'),
+ # a double-quoted scalar
+ (r'\"', String, 'double-quoted-scalar'),
+ ],
+
+ # the content of a flow collection
+ 'flow-collection': [
+ # whitespaces
+ (r'[ ]+', Whitespace),
+ # line breaks
+ (r'\n+', Whitespace),
+ # a comment
+ (r'#[^\n]*', Comment.Single),
+ # simple indicators
+ (r'[?:,]', Punctuation.Indicator),
+ # tags, anchors and aliases
+ include('descriptors'),
+ # nested collections and quoted scalars
+ include('flow-nodes'),
+ # a plain scalar
+ (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
+ something(Name.Variable),
+ 'plain-scalar-in-flow-context'),
+ ],
+
+ # a flow sequence indicated by '[' and ']'
+ 'flow-sequence': [
+ # include flow collection rules
+ include('flow-collection'),
+ # the closing indicator
+ (r'\]', Punctuation.Indicator, '#pop'),
+ ],
+
+ # a flow mapping indicated by '{' and '}'
+ 'flow-mapping': [
+ # key with colon
+ (r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
+ bygroups(Name.Tag, Punctuation)),
+ # include flow collection rules
+ include('flow-collection'),
+ # the closing indicator
+ (r'\}', Punctuation.Indicator, '#pop'),
+ ],
+
+ # block scalar lines
+ 'block-scalar-content': [
+ # line break
+ (r'\n', Whitespace),
+ # empty line
+ (r'^[ ]+$',
+ parse_block_scalar_empty_line(Whitespace, Name.Constant)),
+ # indentation spaces (we may leave the state here)
+ (r'^[ ]*', parse_block_scalar_indent(Whitespace)),
+ # line content
+ (r'[\S\t ]+', Name.Constant),
+ ],
+
+ # the content of a literal or folded scalar
+ 'block-scalar-header': [
+ # indentation indicator followed by chomping flag
+ (r'([1-9])?[+-]?(?=[ ]|$)',
+ set_block_scalar_indent(Punctuation.Indicator),
+ 'ignored-line'),
+ # chomping flag followed by indentation indicator
+ (r'[+-]?([1-9])?(?=[ ]|$)',
+ set_block_scalar_indent(Punctuation.Indicator),
+ 'ignored-line'),
+ ],
+
+ # ignored and regular whitespaces in quoted scalars
+ 'quoted-scalar-whitespaces': [
+ # leading and trailing whitespaces are ignored
+ (r'^[ ]+', Whitespace),
+ (r'[ ]+$', Whitespace),
+ # line breaks are ignored
+ (r'\n+', Whitespace),
+ # other whitespaces are a part of the value
+ (r'[ ]+', Name.Variable),
+ ],
+
+ # single-quoted scalars
+ 'single-quoted-scalar': [
+ # include whitespace and line break rules
+ include('quoted-scalar-whitespaces'),
+ # escaping of the quote character
+ (r'\'\'', String.Escape),
+ # regular non-whitespace characters
+ (r'[^\s\']+', String),
+ # the closing quote
+ (r'\'', String, '#pop'),
+ ],
+
+ # double-quoted scalars
+ 'double-quoted-scalar': [
+ # include whitespace and line break rules
+ include('quoted-scalar-whitespaces'),
+ # escaping of special characters
+ (r'\\[0abt\tn\nvfre "\\N_LP]', String),
+ # escape codes
+ (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
+ String.Escape),
+ # regular non-whitespace characters
+ (r'[^\s"\\]+', String),
+ # the closing quote
+ (r'"', String, '#pop'),
+ ],
+
+ # the beginning of a new line while scanning a plain scalar
+ 'plain-scalar-in-block-context-new-line': [
+ # empty lines
+ (r'^[ ]+$', Whitespace),
+ # line breaks
+ (r'\n+', Whitespace),
+ # document start and document end indicators
+ (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
+ # indentation spaces (we may leave the block line state here)
+ (r'^[ ]*', parse_plain_scalar_indent(Whitespace), '#pop'),
+ ],
+
+ # a plain scalar in the block context
+ 'plain-scalar-in-block-context': [
+ # the scalar ends with the ':' indicator
+ (r'[ ]*(?=:[ ]|:$)', something(Whitespace), '#pop'),
+ # the scalar ends with whitespaces followed by a comment
+ (r'[ ]+(?=#)', Whitespace, '#pop'),
+ # trailing whitespaces are ignored
+ (r'[ ]+$', Whitespace),
+ # line breaks are ignored
+ (r'\n+', Whitespace, 'plain-scalar-in-block-context-new-line'),
+ # other whitespaces are a part of the value
+ (r'[ ]+', Literal.Scalar.Plain),
+ # regular non-whitespace characters
+ (r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
+ ],
+
+ # a plain scalar is the flow context
+ 'plain-scalar-in-flow-context': [
+ # the scalar ends with an indicator character
+ (r'[ ]*(?=[,:?\[\]{}])', something(Whitespace), '#pop'),
+ # the scalar ends with a comment
+ (r'[ ]+(?=#)', Whitespace, '#pop'),
+ # leading and trailing whitespaces are ignored
+ (r'^[ ]+', Whitespace),
+ (r'[ ]+$', Whitespace),
+ # line breaks are ignored
+ (r'\n+', Whitespace),
+ # other whitespaces are a part of the value
+ (r'[ ]+', Name.Variable),
+ # regular non-whitespace characters
+ (r'[^\s,:?\[\]{}]+', Name.Variable),
+ ],
+
+ }
+
+ def get_tokens_unprocessed(self, text=None, context=None):
+ if context is None:
+ context = YamlLexerContext(text, 0)
+ return super().get_tokens_unprocessed(text, context)
+
+
+class JsonLexer(Lexer):
+ """
+ For JSON data structures.
+
+ Javascript-style comments are supported (like ``/* */`` and ``//``),
+ though comments are not part of the JSON specification.
+ This allows users to highlight JSON as it is used in the wild.
+
+ No validation is performed on the input JSON document.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'JSON'
+ url = 'https://www.json.org'
+ aliases = ['json', 'json-object']
+ filenames = ['*.json', 'Pipfile.lock']
+ mimetypes = ['application/json', 'application/json-object']
+
+ # No validation of integers, floats, or constants is done.
+ # As long as the characters are members of the following
+ # sets, the token will be considered valid. For example,
+ #
+ # "--1--" is parsed as an integer
+ # "1...eee" is parsed as a float
+ # "trustful" is parsed as a constant
+ #
+ integers = set('-0123456789')
+ floats = set('.eE+')
+ constants = set('truefalsenull') # true|false|null
+ hexadecimals = set('0123456789abcdefABCDEF')
+ punctuations = set('{}[],')
+ whitespaces = {'\u0020', '\u000a', '\u000d', '\u0009'}
+
+ def get_tokens_unprocessed(self, text):
+ """Parse JSON data."""
+
+ in_string = False
+ in_escape = False
+ in_unicode_escape = 0
+ in_whitespace = False
+ in_constant = False
+ in_number = False
+ in_float = False
+ in_punctuation = False
+ in_comment_single = False
+ in_comment_multiline = False
+ expecting_second_comment_opener = False # // or /*
+ expecting_second_comment_closer = False # */
+
+ start = 0
+
+ # The queue is used to store data that may need to be tokenized
+ # differently based on what follows. In particular, JSON object
+ # keys are tokenized differently than string values, but cannot
+ # be distinguished until punctuation is encountered outside the
+ # string.
+ #
+ # A ":" character after the string indicates that the string is
+ # an object key; any other character indicates the string is a
+ # regular string value.
+ #
+ # The queue holds tuples that contain the following data:
+ #
+ # (start_index, token_type, text)
+ #
+ # By default the token type of text in double quotes is
+ # String.Double. The token type will be replaced if a colon
+ # is encountered after the string closes.
+ #
+ queue = []
+
+ for stop, character in enumerate(text):
+ if in_string:
+ if in_unicode_escape:
+ if character in self.hexadecimals:
+ in_unicode_escape -= 1
+ if not in_unicode_escape:
+ in_escape = False
+ else:
+ in_unicode_escape = 0
+ in_escape = False
+
+ elif in_escape:
+ if character == 'u':
+ in_unicode_escape = 4
+ else:
+ in_escape = False
+
+ elif character == '\\':
+ in_escape = True
+
+ elif character == '"':
+ queue.append((start, String.Double, text[start:stop + 1]))
+ in_string = False
+ in_escape = False
+ in_unicode_escape = 0
+
+ continue
+
+ elif in_whitespace:
+ if character in self.whitespaces:
+ continue
+
+ if queue:
+ queue.append((start, Whitespace, text[start:stop]))
+ else:
+ yield start, Whitespace, text[start:stop]
+ in_whitespace = False
+ # Fall through so the new character can be evaluated.
+
+ elif in_constant:
+ if character in self.constants:
+ continue
+
+ yield start, Keyword.Constant, text[start:stop]
+ in_constant = False
+ # Fall through so the new character can be evaluated.
+
+ elif in_number:
+ if character in self.integers:
+ continue
+ elif character in self.floats:
+ in_float = True
+ continue
+
+ if in_float:
+ yield start, Number.Float, text[start:stop]
+ else:
+ yield start, Number.Integer, text[start:stop]
+ in_number = False
+ in_float = False
+ # Fall through so the new character can be evaluated.
+
+ elif in_punctuation:
+ if character in self.punctuations:
+ continue
+
+ yield start, Punctuation, text[start:stop]
+ in_punctuation = False
+ # Fall through so the new character can be evaluated.
+
+ elif in_comment_single:
+ if character != '\n':
+ continue
+
+ if queue:
+ queue.append((start, Comment.Single, text[start:stop]))
+ else:
+ yield start, Comment.Single, text[start:stop]
+
+ in_comment_single = False
+ # Fall through so the new character can be evaluated.
+
+ elif in_comment_multiline:
+ if character == '*':
+ expecting_second_comment_closer = True
+ elif expecting_second_comment_closer:
+ expecting_second_comment_closer = False
+ if character == '/':
+ if queue:
+ queue.append((start, Comment.Multiline, text[start:stop + 1]))
+ else:
+ yield start, Comment.Multiline, text[start:stop + 1]
+
+ in_comment_multiline = False
+
+ continue
+
+ elif expecting_second_comment_opener:
+ expecting_second_comment_opener = False
+ if character == '/':
+ in_comment_single = True
+ continue
+ elif character == '*':
+ in_comment_multiline = True
+ continue
+
+ # Exhaust the queue. Accept the existing token types.
+ yield from queue
+ queue.clear()
+
+ yield start, Error, text[start:stop]
+ # Fall through so the new character can be evaluated.
+
+ start = stop
+
+ if character == '"':
+ in_string = True
+
+ elif character in self.whitespaces:
+ in_whitespace = True
+
+ elif character in {'f', 'n', 't'}: # The first letters of true|false|null
+ # Exhaust the queue. Accept the existing token types.
+ yield from queue
+ queue.clear()
+
+ in_constant = True
+
+ elif character in self.integers:
+ # Exhaust the queue. Accept the existing token types.
+ yield from queue
+ queue.clear()
+
+ in_number = True
+
+ elif character == ':':
+ # Yield from the queue. Replace string token types.
+ for _start, _token, _text in queue:
+ # There can be only three types of tokens before a ':':
+ # Whitespace, Comment, or a quoted string.
+ #
+ # If it's a quoted string we emit Name.Tag.
+ # Otherwise, we yield the original token.
+ #
+ # In all other cases this would be invalid JSON,
+ # but this is not a validating JSON lexer, so it's OK.
+ if _token is String.Double:
+ yield _start, Name.Tag, _text
+ else:
+ yield _start, _token, _text
+ queue.clear()
+
+ in_punctuation = True
+
+ elif character in self.punctuations:
+ # Exhaust the queue. Accept the existing token types.
+ yield from queue
+ queue.clear()
+
+ in_punctuation = True
+
+ elif character == '/':
+ # This is the beginning of a comment.
+ expecting_second_comment_opener = True
+
+ else:
+ # Exhaust the queue. Accept the existing token types.
+ yield from queue
+ queue.clear()
+
+ yield start, Error, character
+
+ # Yield any remaining text.
+ yield from queue
+ if in_string:
+ yield start, Error, text[start:]
+ elif in_float:
+ yield start, Number.Float, text[start:]
+ elif in_number:
+ yield start, Number.Integer, text[start:]
+ elif in_constant:
+ yield start, Keyword.Constant, text[start:]
+ elif in_whitespace:
+ yield start, Whitespace, text[start:]
+ elif in_punctuation:
+ yield start, Punctuation, text[start:]
+ elif in_comment_single:
+ yield start, Comment.Single, text[start:]
+ elif in_comment_multiline:
+ yield start, Error, text[start:]
+ elif expecting_second_comment_opener:
+ yield start, Error, text[start:]
+
+
+class JsonBareObjectLexer(JsonLexer):
+ """
+ For JSON data structures (with missing object curly braces).
+
+ .. versionadded:: 2.2
+
+ .. deprecated:: 2.8.0
+
+ Behaves the same as `JsonLexer` now.
+ """
+
+ name = 'JSONBareObject'
+ aliases = []
+ filenames = []
+ mimetypes = []
+
+
+class JsonLdLexer(JsonLexer):
+ """
+ For JSON-LD linked data.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'JSON-LD'
+ url = 'https://json-ld.org/'
+ aliases = ['jsonld', 'json-ld']
+ filenames = ['*.jsonld']
+ mimetypes = ['application/ld+json']
+
+ json_ld_keywords = {
+ '"@%s"' % keyword
+ for keyword in (
+ 'base',
+ 'container',
+ 'context',
+ 'direction',
+ 'graph',
+ 'id',
+ 'import',
+ 'included',
+ 'index',
+ 'json',
+ 'language',
+ 'list',
+ 'nest',
+ 'none',
+ 'prefix',
+ 'propagate',
+ 'protected',
+ 'reverse',
+ 'set',
+ 'type',
+ 'value',
+ 'version',
+ 'vocab',
+ )
+ }
+
+ def get_tokens_unprocessed(self, text):
+ for start, token, value in super().get_tokens_unprocessed(text):
+ if token is Name.Tag and value in self.json_ld_keywords:
+ yield start, Name.Decorator, value
+ else:
+ yield start, token, value
diff --git a/pygments/lexers/devicetree.py b/pygments/lexers/devicetree.py
new file mode 100644
index 0000000..15b72ae
--- /dev/null
+++ b/pygments/lexers/devicetree.py
@@ -0,0 +1,109 @@
+"""
+ pygments.lexers.devicetree
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Devicetree language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, default, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
+
+__all__ = ['DevicetreeLexer']
+
+
+class DevicetreeLexer(RegexLexer):
+ """
+ Lexer for Devicetree files.
+
+ .. versionadded:: 2.7
+ """
+
+ name = 'Devicetree'
+ url = 'https://www.devicetree.org/'
+ aliases = ['devicetree', 'dts']
+ filenames = ['*.dts', '*.dtsi']
+ mimetypes = ['text/x-c']
+
+ #: optional Whitespace or /*...*/ style comment
+ _ws = r'\s*(?:/[*][^*/]*?[*]/\s*)*'
+
+ tokens = {
+ 'macro': [
+ # Include preprocessor directives (C style):
+ (r'(#include)(' + _ws + r')([^\n]+)',
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
+ # Define preprocessor directives (C style):
+ (r'(#define)(' + _ws + r')([^\n]+)',
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc)),
+ # devicetree style with file:
+ (r'(/[^*/{]+/)(' + _ws + r')("[^\n{]+")',
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
+ # devicetree style with property:
+ (r'(/[^*/{]+/)(' + _ws + r')([^\n;{]*)([;]?)',
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc, Punctuation)),
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'\\\n', Text), # line continuation
+ (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
+ # Open until EOF, so no ending delimiter
+ (r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
+ ],
+ 'statements': [
+ (r'(L?)(")', bygroups(String.Affix, String), 'string'),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'([^\s{}/*]*)(\s*)(:)', bygroups(Name.Label, Text, Punctuation), '#pop'),
+ (words(('compatible', 'model', 'phandle', 'status', '#address-cells',
+ '#size-cells', 'reg', 'virtual-reg', 'ranges', 'dma-ranges',
+ 'device_type', 'name'), suffix=r'\b'), Keyword.Reserved),
+ (r'([~!%^&*+=|?:<>/#-])', Operator),
+ (r'[()\[\]{},.]', Punctuation),
+ (r'[a-zA-Z_][\w-]*(?=(?:\s*,\s*[a-zA-Z_][\w-]*|(?:' + _ws + r'))*\s*[=;])',
+ Name),
+ (r'[a-zA-Z_]\w*', Name.Attribute),
+ ],
+ 'root': [
+ include('whitespace'),
+ include('macro'),
+
+ # Nodes
+ (r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)',
+ bygroups(Name.Function, Operator, Number.Integer,
+ Comment.Multiline, Punctuation), 'node'),
+
+ default('statement'),
+ ],
+ 'statement': [
+ include('whitespace'),
+ include('statements'),
+ (';', Punctuation, '#pop'),
+ ],
+ 'node': [
+ include('whitespace'),
+ include('macro'),
+
+ (r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)',
+ bygroups(Name.Function, Operator, Number.Integer,
+ Comment.Multiline, Punctuation), '#push'),
+
+ include('statements'),
+
+ (r'\};', Punctuation, '#pop'),
+ (';', Punctuation),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
+ r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ }
diff --git a/pygments/lexers/diff.py b/pygments/lexers/diff.py
new file mode 100644
index 0000000..6a7ba2f
--- /dev/null
+++ b/pygments/lexers/diff.py
@@ -0,0 +1,165 @@
+"""
+ pygments.lexers.diff
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for diff/patch formats.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
+ Literal, Whitespace
+
+__all__ = ['DiffLexer', 'DarcsPatchLexer', 'WDiffLexer']
+
+
+class DiffLexer(RegexLexer):
+ """
+ Lexer for unified or context-style diffs or patches.
+ """
+
+ name = 'Diff'
+ aliases = ['diff', 'udiff']
+ filenames = ['*.diff', '*.patch']
+ mimetypes = ['text/x-diff', 'text/x-patch']
+
+ tokens = {
+ 'root': [
+ (r'( )(.*)(\n)', bygroups(Whitespace, Text, Whitespace)),
+ (r'(\+.*)(\n)', bygroups(Generic.Inserted, Whitespace)),
+ (r'(-.*)(\n)', bygroups(Generic.Deleted, Whitespace)),
+ (r'(!.*)(\n)', bygroups(Generic.Strong, Whitespace)),
+ (r'(@.*)(\n)', bygroups(Generic.Subheading, Whitespace)),
+ (r'((?:[Ii]ndex|diff).*)(\n)', bygroups(Generic.Heading, Whitespace)),
+ (r'(=.*)(\n)', bygroups(Generic.Heading, Whitespace)),
+ (r'(.*)(\n)', Whitespace),
+ ]
+ }
+
+ def analyse_text(text):
+ if text[:7] == 'Index: ':
+ return True
+ if text[:5] == 'diff ':
+ return True
+ if text[:4] == '--- ':
+ return 0.9
+
+
+class DarcsPatchLexer(RegexLexer):
+ """
+ DarcsPatchLexer is a lexer for the various versions of the darcs patch
+ format. Examples of this format are derived by commands such as
+ ``darcs annotate --patch`` and ``darcs send``.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'Darcs Patch'
+ aliases = ['dpatch']
+ filenames = ['*.dpatch', '*.darcspatch']
+
+ DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
+ 'replace')
+
+ tokens = {
+ 'root': [
+ (r'<', Operator),
+ (r'>', Operator),
+ (r'\{', Operator),
+ (r'\}', Operator),
+ (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
+ bygroups(Operator, Keyword, Name, Whitespace, Name, Operator,
+ Literal.Date, Whitespace, Operator)),
+ (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
+ bygroups(Operator, Keyword, Name, Whitespace, Name, Operator,
+ Literal.Date, Whitespace), 'comment'),
+ (r'New patches:', Generic.Heading),
+ (r'Context:', Generic.Heading),
+ (r'Patch bundle hash:', Generic.Heading),
+ (r'(\s*)(%s)(.*)(\n)' % '|'.join(DPATCH_KEYWORDS),
+ bygroups(Whitespace, Keyword, Text, Whitespace)),
+ (r'\+', Generic.Inserted, "insert"),
+ (r'-', Generic.Deleted, "delete"),
+ (r'(.*)(\n)', bygroups(Text, Whitespace)),
+ ],
+ 'comment': [
+ (r'[^\]].*\n', Comment),
+ (r'\]', Operator, "#pop"),
+ ],
+ 'specialText': [ # darcs add [_CODE_] special operators for clarity
+ (r'\n', Whitespace, "#pop"), # line-based
+ (r'\[_[^_]*_]', Operator),
+ ],
+ 'insert': [
+ include('specialText'),
+ (r'\[', Generic.Inserted),
+ (r'[^\n\[]+', Generic.Inserted),
+ ],
+ 'delete': [
+ include('specialText'),
+ (r'\[', Generic.Deleted),
+ (r'[^\n\[]+', Generic.Deleted),
+ ],
+ }
+
+
+class WDiffLexer(RegexLexer):
+ """
+ A wdiff lexer.
+
+ Note that:
+
+ * It only works with normal output (without options like ``-l``).
+ * If the target files contain "[-", "-]", "{+", or "+}",
+ especially they are unbalanced, the lexer will get confused.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'WDiff'
+ url = 'https://www.gnu.org/software/wdiff/'
+ aliases = ['wdiff']
+ filenames = ['*.wdiff']
+ mimetypes = []
+
+ flags = re.MULTILINE | re.DOTALL
+
+ # We can only assume "[-" after "[-" before "-]" is `nested`,
+ # for instance wdiff to wdiff outputs. We have no way to
+ # distinct these marker is of wdiff output from original text.
+
+ ins_op = r"\{\+"
+ ins_cl = r"\+\}"
+ del_op = r"\[\-"
+ del_cl = r"\-\]"
+ normal = r'[^{}[\]+-]+' # for performance
+ tokens = {
+ 'root': [
+ (ins_op, Generic.Inserted, 'inserted'),
+ (del_op, Generic.Deleted, 'deleted'),
+ (normal, Text),
+ (r'.', Text),
+ ],
+ 'inserted': [
+ (ins_op, Generic.Inserted, '#push'),
+ (del_op, Generic.Inserted, '#push'),
+ (del_cl, Generic.Inserted, '#pop'),
+
+ (ins_cl, Generic.Inserted, '#pop'),
+ (normal, Generic.Inserted),
+ (r'.', Generic.Inserted),
+ ],
+ 'deleted': [
+ (del_op, Generic.Deleted, '#push'),
+ (ins_op, Generic.Deleted, '#push'),
+ (ins_cl, Generic.Deleted, '#pop'),
+
+ (del_cl, Generic.Deleted, '#pop'),
+ (normal, Generic.Deleted),
+ (r'.', Generic.Deleted),
+ ],
+ }
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
new file mode 100644
index 0000000..0778ffe
--- /dev/null
+++ b/pygments/lexers/dotnet.py
@@ -0,0 +1,729 @@
+"""
+ pygments.lexers.dotnet
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for .net languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import re
+
+from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
+ using, this, default, words
+from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
+ Name, String, Number, Literal, Other, Whitespace
+from pygments.util import get_choice_opt
+from pygments import unistring as uni
+
+from pygments.lexers.html import XmlLexer
+
+__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
+ 'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
+
+
+class CSharpLexer(RegexLexer):
+ """
+ For C# source code.
+
+ Additional options accepted:
+
+ `unicodelevel`
+ Determines which Unicode characters this lexer allows for identifiers.
+ The possible values are:
+
+ * ``none`` -- only the ASCII letters and numbers are allowed. This
+ is the fastest selection.
+ * ``basic`` -- all Unicode characters from the specification except
+ category ``Lo`` are allowed.
+ * ``full`` -- all Unicode characters as specified in the C# specs
+ are allowed. Note that this means a considerable slowdown since the
+ ``Lo`` category has more than 40,000 characters in it!
+
+ The default value is ``basic``.
+
+ .. versionadded:: 0.8
+ """
+
+ name = 'C#'
+ url = 'https://docs.microsoft.com/en-us/dotnet/csharp/'
+ aliases = ['csharp', 'c#', 'cs']
+ filenames = ['*.cs']
+ mimetypes = ['text/x-csharp'] # inferred
+
+ flags = re.MULTILINE | re.DOTALL
+
+ # for the range of allowed unicode characters in identifiers, see
+ # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
+
+ levels = {
+ 'none': r'@?[_a-zA-Z]\w*',
+ 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
+ '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
+ 'Cf', 'Mn', 'Mc') + ']*'),
+ 'full': ('@?(?:_|[^' +
+ uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
+ 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
+ }
+
+ tokens = {}
+ token_variants = True
+
+ for levelname, cs_ident in levels.items():
+ tokens[levelname] = {
+ 'root': [
+ # method names
+ (r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
+ r'(' + cs_ident + ')' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(Whitespace, using(this), Name.Function, Whitespace,
+ Punctuation)),
+ (r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
+ (r'[^\S\n]+', Whitespace),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
+ (r'//.*?\n', Comment.Single),
+ (r'/[*].*?[*]/', Comment.Multiline),
+ (r'\n', Whitespace),
+ (words((
+ '>>>=', '>>=', '<<=', '<=', '>=', '+=', '-=', '*=', '/=',
+ '%=', '&=', '|=', '^=', '??=', '=>', '??', '?.', '!=', '==',
+ '&&', '||', '>>>', '>>', '<<', '++', '--', '+', '-', '*',
+ '/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=',
+ )), Operator),
+ (r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
+ (r'[()\[\];:,.]', Punctuation),
+ (r'[{}]', Punctuation),
+ (r'@"(""|[^"])*"', String),
+ (r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
+ (r"'\\.'|'[^\\]'", String.Char),
+ (r"[0-9]+(\.[0-9]*)?([eE][+-][0-9]+)?"
+ r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
+ r'line|error|warning|region|endregion|pragma)\b(.*?)(\n)',
+ bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
+ Comment.Preproc, Whitespace)),
+ (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace,
+ Keyword)),
+ (r'(abstract|as|async|await|base|break|by|case|catch|'
+ r'checked|const|continue|default|delegate|'
+ r'do|else|enum|event|explicit|extern|false|finally|'
+ r'fixed|for|foreach|goto|if|implicit|in|interface|'
+ r'internal|is|let|lock|new|null|on|operator|'
+ r'out|override|params|private|protected|public|readonly|'
+ r'ref|return|sealed|sizeof|stackalloc|static|'
+ r'switch|this|throw|true|try|typeof|'
+ r'unchecked|unsafe|virtual|void|while|'
+ r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
+ r'descending|from|group|into|orderby|select|thenby|where|'
+ r'join|equals)\b', Keyword),
+ (r'(global)(::)', bygroups(Keyword, Punctuation)),
+ (r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
+ r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
+ (r'(class|struct)(\s+)', bygroups(Keyword, Whitespace), 'class'),
+ (r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
+ (cs_ident, Name),
+ ],
+ 'class': [
+ (cs_ident, Name.Class, '#pop'),
+ default('#pop'),
+ ],
+ 'namespace': [
+ (r'(?=\()', Text, '#pop'), # using (resource)
+ ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
+ ]
+ }
+
+ def __init__(self, **options):
+ level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
+ if level not in self._all_tokens:
+ # compile the regexes now
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+
+ RegexLexer.__init__(self, **options)
+
+
+class NemerleLexer(RegexLexer):
+ """
+ For Nemerle source code.
+
+ Additional options accepted:
+
+ `unicodelevel`
+ Determines which Unicode characters this lexer allows for identifiers.
+ The possible values are:
+
+ * ``none`` -- only the ASCII letters and numbers are allowed. This
+ is the fastest selection.
+ * ``basic`` -- all Unicode characters from the specification except
+ category ``Lo`` are allowed.
+ * ``full`` -- all Unicode characters as specified in the C# specs
+ are allowed. Note that this means a considerable slowdown since the
+ ``Lo`` category has more than 40,000 characters in it!
+
+ The default value is ``basic``.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Nemerle'
+ url = 'http://nemerle.org'
+ aliases = ['nemerle']
+ filenames = ['*.n']
+ mimetypes = ['text/x-nemerle'] # inferred
+
+ flags = re.MULTILINE | re.DOTALL
+
+ # for the range of allowed unicode characters in identifiers, see
+ # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
+
+ levels = {
+ 'none': r'@?[_a-zA-Z]\w*',
+ 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
+ '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
+ 'Cf', 'Mn', 'Mc') + ']*'),
+ 'full': ('@?(?:_|[^' +
+ uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
+ 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
+ }
+
+ tokens = {}
+ token_variants = True
+
+ for levelname, cs_ident in levels.items():
+ tokens[levelname] = {
+ 'root': [
+ # method names
+ (r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
+ r'(' + cs_ident + ')' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(Whitespace, using(this), Name.Function, Whitespace, \
+ Punctuation)),
+ (r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
+ (r'[^\S\n]+', Whitespace),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
+ (r'//.*?\n', Comment.Single),
+ (r'/[*].*?[*]/', Comment.Multiline),
+ (r'\n', Whitespace),
+ (r'(\$)(\s*)(")', bygroups(String, Whitespace, String),
+ 'splice-string'),
+ (r'(\$)(\s*)(<#)', bygroups(String, Whitespace, String),
+ 'splice-string2'),
+ (r'<#', String, 'recursive-string'),
+
+ (r'(<\[)(\s*)(' + cs_ident + ':)?',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'\]\>', Keyword),
+
+ # quasiquotation only
+ (r'\$' + cs_ident, Name),
+ (r'(\$)(\()', bygroups(Name, Punctuation),
+ 'splice-string-content'),
+
+ (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
+ (r'[{}]', Punctuation),
+ (r'@"(""|[^"])*"', String),
+ (r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
+ (r"'\\.'|'[^\\]'", String.Char),
+ (r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
+ (r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
+ r'line|error|warning|region|endregion|pragma)\b',
+ bygroups(Comment.Preproc, Whitespace, Comment.Preproc), 'preproc'),
+ (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace, Keyword)),
+ (r'(abstract|and|as|base|catch|def|delegate|'
+ r'enum|event|extern|false|finally|'
+ r'fun|implements|interface|internal|'
+ r'is|macro|match|matches|module|mutable|new|'
+ r'null|out|override|params|partial|private|'
+ r'protected|public|ref|sealed|static|'
+ r'syntax|this|throw|true|try|type|typeof|'
+ r'virtual|volatile|when|where|with|'
+ r'assert|assert2|async|break|checked|continue|do|else|'
+ r'ensures|for|foreach|if|late|lock|new|nolate|'
+ r'otherwise|regexp|repeat|requires|return|surroundwith|'
+ r'unchecked|unless|using|while|yield)\b', Keyword),
+ (r'(global)(::)', bygroups(Keyword, Punctuation)),
+ (r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
+ r'short|string|uint|ulong|ushort|void|array|list)\b\??',
+ Keyword.Type),
+ (r'(:>?)(\s*)(' + cs_ident + r'\??)',
+ bygroups(Punctuation, Whitespace, Keyword.Type)),
+ (r'(class|struct|variant|module)(\s+)',
+ bygroups(Keyword, Whitespace), 'class'),
+ (r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace),
+ 'namespace'),
+ (cs_ident, Name),
+ ],
+ 'class': [
+ (cs_ident, Name.Class, '#pop')
+ ],
+ 'preproc': [
+ (r'\w+', Comment.Preproc),
+ (r'[ \t]+', Whitespace),
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'namespace': [
+ (r'(?=\()', Text, '#pop'), # using (resource)
+ ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
+ ],
+ 'splice-string': [
+ (r'[^"$]', String),
+ (r'\$' + cs_ident, Name),
+ (r'(\$)(\()', bygroups(Name, Punctuation),
+ 'splice-string-content'),
+ (r'\\"', String),
+ (r'"', String, '#pop')
+ ],
+ 'splice-string2': [
+ (r'[^#<>$]', String),
+ (r'\$' + cs_ident, Name),
+ (r'(\$)(\()', bygroups(Name, Punctuation),
+ 'splice-string-content'),
+ (r'<#', String, '#push'),
+ (r'#>', String, '#pop')
+ ],
+ 'recursive-string': [
+ (r'[^#<>]', String),
+ (r'<#', String, '#push'),
+ (r'#>', String, '#pop')
+ ],
+ 'splice-string-content': [
+ (r'if|match', Keyword),
+ (r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
+ (cs_ident, Name),
+ (r'\d+', Number),
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop')
+ ]
+ }
+
+ def __init__(self, **options):
+ level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
+ 'basic')
+ if level not in self._all_tokens:
+ # compile the regexes now
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+
+ RegexLexer.__init__(self, **options)
+
+ def analyse_text(text):
+ """Nemerle is quite similar to Python, but @if is relatively uncommon
+ elsewhere."""
+ result = 0
+
+ if '@if' in text:
+ result += 0.1
+
+ return result
+
+
+class BooLexer(RegexLexer):
+ """
+ For Boo source code.
+ """
+
+ name = 'Boo'
+ url = 'https://github.com/boo-lang/boo'
+ aliases = ['boo']
+ filenames = ['*.boo']
+ mimetypes = ['text/x-boo']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(#|//).*$', Comment.Single),
+ (r'/[*]', Comment.Multiline, 'comment'),
+ (r'[]{}:(),.;[]', Punctuation),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)),
+ (r'\\', Text),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ (r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex),
+ (r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex),
+ (r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
+ (r'(as|abstract|callable|constructor|destructor|do|import|'
+ r'enum|event|final|get|interface|internal|of|override|'
+ r'partial|private|protected|public|return|set|static|'
+ r'struct|transient|virtual|yield|super|and|break|cast|'
+ r'continue|elif|else|ensure|except|for|given|goto|if|in|'
+ r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
+ r'while|from|as)\b', Keyword),
+ (r'def(?=\s+\(.*?\))', Keyword),
+ (r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
+ (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+ (r'(namespace)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
+ (r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
+ r'assert|checked|enumerate|filter|getter|len|lock|map|'
+ r'matrix|max|min|normalArrayIndexing|print|property|range|'
+ r'rawArrayIndexing|required|typeof|unchecked|using|'
+ r'yieldAll|zip)\b', Name.Builtin),
+ (r'"""(\\\\|\\"|.*?)"""', String.Double),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[a-zA-Z_]\w*', Name),
+ (r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
+ (r'[0-9][0-9.]*(ms?|d|h|s)', Number),
+ (r'0\d+', Number.Oct),
+ (r'0x[a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+', Number.Integer),
+ ],
+ 'comment': [
+ ('/[*]', Comment.Multiline, '#push'),
+ ('[*]/', Comment.Multiline, '#pop'),
+ ('[^/*]', Comment.Multiline),
+ ('[*/]', Comment.Multiline)
+ ],
+ 'funcname': [
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop')
+ ],
+ 'classname': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'namespace': [
+ (r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
+ ]
+ }
+
+
+class VbNetLexer(RegexLexer):
+ """
+ For Visual Basic.NET source code.
+ Also LibreOffice Basic, OpenOffice Basic, and StarOffice Basic.
+ """
+
+ name = 'VB.net'
+ url = 'https://docs.microsoft.com/en-us/dotnet/visual-basic/'
+ aliases = ['vb.net', 'vbnet', 'lobas', 'oobas', 'sobas']
+ filenames = ['*.vb', '*.bas']
+ mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
+
+ uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
+ '[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
+ 'Cf', 'Mn', 'Mc') + ']*'
+
+ flags = re.MULTILINE | re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'^\s*<.*?>', Name.Attribute),
+ (r'\s+', Whitespace),
+ (r'\n', Whitespace),
+ (r'(rem\b.*?)(\n)', bygroups(Comment, Whitespace)),
+ (r"('.*?)(\n)", bygroups(Comment, Whitespace)),
+ (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
+ r'#ExternalSource.*?\n|#End\s+ExternalSource|'
+ r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
+ Comment.Preproc),
+ (r'[(){}!#,.:]', Punctuation),
+ (r'(Option)(\s+)(Strict|Explicit|Compare)(\s+)'
+ r'(On|Off|Binary|Text)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
+ Whitespace, Keyword.Declaration)),
+ (words((
+ 'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
+ 'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
+ 'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
+ 'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
+ 'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
+ 'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
+ 'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
+ 'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
+ 'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
+ 'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
+ 'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
+ 'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
+ 'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
+ 'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
+ 'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
+ 'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
+ 'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
+ 'Widening', 'With', 'WithEvents', 'WriteOnly'),
+ prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
+ (r'(?<!\.)End\b', Keyword, 'end'),
+ (r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
+ (r'(?<!\.)(Function|Sub|Property)(\s+)',
+ bygroups(Keyword, Whitespace), 'funcname'),
+ (r'(?<!\.)(Class|Structure|Enum)(\s+)',
+ bygroups(Keyword, Whitespace), 'classname'),
+ (r'(?<!\.)(Module|Namespace|Imports)(\s+)',
+ bygroups(Keyword, Whitespace), 'namespace'),
+ (r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
+ r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
+ r'UShort)\b', Keyword.Type),
+ (r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
+ r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
+ (r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
+ r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
+ Operator),
+ ('"', String, 'string'),
+ (r'(_)(\n)', bygroups(Text, Whitespace)), # Line continuation (must be before Name)
+ (uni_name + '[%&@!#$]?', Name),
+ ('#.*?#', Literal.Date),
+ (r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
+ (r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
+ (r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
+ (r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
+ ],
+ 'string': [
+ (r'""', String),
+ (r'"C?', String, '#pop'),
+ (r'[^"]+', String),
+ ],
+ 'dim': [
+ (uni_name, Name.Variable, '#pop'),
+ default('#pop'), # any other syntax
+ ],
+ 'funcname': [
+ (uni_name, Name.Function, '#pop'),
+ ],
+ 'classname': [
+ (uni_name, Name.Class, '#pop'),
+ ],
+ 'namespace': [
+ (uni_name, Name.Namespace),
+ (r'\.', Name.Namespace),
+ default('#pop'),
+ ],
+ 'end': [
+ (r'\s+', Whitespace),
+ (r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
+ Keyword, '#pop'),
+ default('#pop'),
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
+ return 0.5
+
+
+class GenericAspxLexer(RegexLexer):
+ """
+ Lexer for ASP.NET pages.
+ """
+
+ name = 'aspx-gen'
+ filenames = []
+ mimetypes = []
+
+ flags = re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
+ (r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
+ Other,
+ using(XmlLexer))),
+ (r'(.+?)(?=<)', using(XmlLexer)),
+ (r'.+', using(XmlLexer)),
+ ],
+ }
+
+
+# TODO support multiple languages within the same source file
+class CSharpAspxLexer(DelegatingLexer):
+ """
+ Lexer for highlighting C# within ASP.NET pages.
+ """
+
+ name = 'aspx-cs'
+ aliases = ['aspx-cs']
+ filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super().__init__(CSharpLexer, GenericAspxLexer, **options)
+
+ def analyse_text(text):
+ if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
+ return 0.2
+ elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
+ return 0.15
+
+
+class VbNetAspxLexer(DelegatingLexer):
+ """
+ Lexer for highlighting Visual Basic.net within ASP.NET pages.
+ """
+
+ name = 'aspx-vb'
+ aliases = ['aspx-vb']
+ filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super().__init__(VbNetLexer, GenericAspxLexer, **options)
+
+ def analyse_text(text):
+ if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
+ return 0.2
+ elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
+ return 0.15
+
+
+# Very close to functional.OcamlLexer
+class FSharpLexer(RegexLexer):
+ """
+ For the F# language (version 3.0).
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'F#'
+ url = 'https://fsharp.org/'
+ aliases = ['fsharp', 'f#']
+ filenames = ['*.fs', '*.fsi', '*.fsx']
+ mimetypes = ['text/x-fsharp']
+
+ keywords = [
+ 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
+ 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
+ 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
+ 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
+ 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
+ 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
+ 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
+ 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
+ 'while', 'with', 'yield!', 'yield',
+ ]
+ # Reserved words; cannot hurt to color them as keywords too.
+ keywords += [
+ 'atomic', 'break', 'checked', 'component', 'const', 'constraint',
+ 'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
+ 'functor', 'include', 'method', 'mixin', 'object', 'parallel',
+ 'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
+ 'virtual', 'volatile',
+ ]
+ keyopts = [
+ '!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
+ '->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
+ r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
+ '_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
+ ]
+
+ operators = r'[!$%&*+\./:<=>?@^|~-]'
+ word_operators = ['and', 'or', 'not']
+ prefix_syms = r'[!?~]'
+ infix_syms = r'[=<>@^|&+\*/$%-]'
+ primitives = [
+ 'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
+ 'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
+ 'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
+ 'list', 'exn', 'obj', 'enum',
+ ]
+
+ # See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
+ # http://fsharp.org/about/files/spec.pdf for reference. Good luck.
+
+ tokens = {
+ 'escape-sequence': [
+ (r'\\[\\"\'ntbrafv]', String.Escape),
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\U[0-9a-fA-F]{8}', String.Escape),
+ ],
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'\(\)|\[\]', Name.Builtin.Pseudo),
+ (r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
+ Name.Namespace, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name),
+ (r'(///.*?)(\n)', bygroups(String.Doc, Whitespace)),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'\(\*(?!\))', Comment, 'comment'),
+
+ (r'@"', String, 'lstring'),
+ (r'"""', String, 'tqs'),
+ (r'"', String, 'string'),
+
+ (r'\b(open|module)(\s+)([\w.]+)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'\b(let!?)(\s+)(\w+)',
+ bygroups(Keyword, Whitespace, Name.Variable)),
+ (r'\b(type)(\s+)(\w+)',
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
+ bygroups(Keyword, Whitespace, Name, Punctuation, Name.Function)),
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword),
+ (r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
+ (r'(%s)' % '|'.join(keyopts), Operator),
+ (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
+ (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
+ (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
+ (r'(#)([ \t]*)(if|endif|else|line|nowarn|light|\d+)\b(.*?)(\n)',
+ bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
+ Comment.Preproc, Whitespace)),
+
+ (r"[^\W\d][\w']*", Name),
+
+ (r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
+ (r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
+ (r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
+ (r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
+ Number.Float),
+
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
+ String.Char),
+ (r"'.'", String.Char),
+ (r"'", Keyword), # a stray quote is another syntax element
+
+ (r'@?"', String.Double, 'string'),
+
+ (r'[~?][a-z][\w\']*:', Name.Variable),
+ ],
+ 'dotted': [
+ (r'\s+', Whitespace),
+ (r'\.', Punctuation),
+ (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+ (r'[A-Z][\w\']*', Name, '#pop'),
+ (r'[a-z_][\w\']*', Name, '#pop'),
+ # e.g. dictionary index access
+ default('#pop'),
+ ],
+ 'comment': [
+ (r'[^(*)@"]+', Comment),
+ (r'\(\*', Comment, '#push'),
+ (r'\*\)', Comment, '#pop'),
+ # comments cannot be closed within strings in comments
+ (r'@"', String, 'lstring'),
+ (r'"""', String, 'tqs'),
+ (r'"', String, 'string'),
+ (r'[(*)@]', Comment),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ include('escape-sequence'),
+ (r'\\\n', String),
+ (r'\n', String), # newlines are allowed in any string
+ (r'"B?', String, '#pop'),
+ ],
+ 'lstring': [
+ (r'[^"]+', String),
+ (r'\n', String),
+ (r'""', String),
+ (r'"B?', String, '#pop'),
+ ],
+ 'tqs': [
+ (r'[^"]+', String),
+ (r'\n', String),
+ (r'"""B?', String, '#pop'),
+ (r'"', String),
+ ],
+ }
+
+ def analyse_text(text):
+ """F# doesn't have that many unique features -- |> and <| are weak
+ indicators."""
+ result = 0
+ if '|>' in text:
+ result += 0.05
+ if '<|' in text:
+ result += 0.05
+
+ return result
diff --git a/pygments/lexers/dsls.py b/pygments/lexers/dsls.py
new file mode 100644
index 0000000..ea16766
--- /dev/null
+++ b/pygments/lexers/dsls.py
@@ -0,0 +1,981 @@
+"""
+ pygments.lexers.dsls
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for various domain-specific languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \
+ include, default, this, using, combined
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['ProtoBufLexer', 'ZeekLexer', 'PuppetLexer', 'RslLexer',
+ 'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
+ 'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer']
+
+
+class ProtoBufLexer(RegexLexer):
+ """
+ Lexer for Protocol Buffer definition files.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'Protocol Buffer'
+ url = 'https://developers.google.com/protocol-buffers/'
+ aliases = ['protobuf', 'proto']
+ filenames = ['*.proto']
+
+ tokens = {
+ 'root': [
+ (r'[ \t]+', Whitespace),
+ (r'[,;{}\[\]()<>]', Punctuation),
+ (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
+ (words((
+ 'import', 'option', 'optional', 'required', 'repeated',
+ 'reserved', 'default', 'packed', 'ctype', 'extensions', 'to',
+ 'max', 'rpc', 'returns', 'oneof', 'syntax'), prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (words((
+ 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
+ 'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
+ 'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
+ Keyword.Type),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'package'),
+ (r'(message|extend)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'message'),
+ (r'(enum|group|service)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'type'),
+ (r'\".*?\"', String),
+ (r'\'.*?\'', String),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'(\-?(inf|nan))\b', Number.Float),
+ (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
+ (r'0[0-7]+[LlUu]*', Number.Oct),
+ (r'\d+[LlUu]*', Number.Integer),
+ (r'[+-=]', Operator),
+ (r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
+ bygroups(Name.Attribute, Whitespace, Operator)),
+ (r'[a-zA-Z_][\w.]*', Name),
+ ],
+ 'package': [
+ (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
+ default('#pop'),
+ ],
+ 'message': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+ default('#pop'),
+ ],
+ 'type': [
+ (r'[a-zA-Z_]\w*', Name, '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class ThriftLexer(RegexLexer):
+ """
+ For Thrift interface definitions.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Thrift'
+ url = 'https://thrift.apache.org/'
+ aliases = ['thrift']
+ filenames = ['*.thrift']
+ mimetypes = ['application/x-thrift']
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ (r'"', String.Double, combined('stringescape', 'dqs')),
+ (r'\'', String.Single, combined('stringescape', 'sqs')),
+ (r'(namespace)(\s+)',
+ bygroups(Keyword.Namespace, Whitespace), 'namespace'),
+ (r'(enum|union|struct|service|exception)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'class'),
+ (r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
+ r'((?:[^\W\d]|\$)[\w$]*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ include('keywords'),
+ include('numbers'),
+ (r'[&=]', Operator),
+ (r'[:;,{}()<>\[\]]', Punctuation),
+ (r'[a-zA-Z_](\.\w|\w)*', Name),
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ ],
+ 'comments': [
+ (r'#.*$', Comment),
+ (r'//.*?\n', Comment),
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ ],
+ 'stringescape': [
+ (r'\\([\\nrt"\'])', String.Escape),
+ ],
+ 'dqs': [
+ (r'"', String.Double, '#pop'),
+ (r'[^\\"\n]+', String.Double),
+ ],
+ 'sqs': [
+ (r"'", String.Single, '#pop'),
+ (r'[^\\\'\n]+', String.Single),
+ ],
+ 'namespace': [
+ (r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
+ default('#pop'),
+ ],
+ 'class': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+ default('#pop'),
+ ],
+ 'keywords': [
+ (r'(async|oneway|extends|throws|required|optional)\b', Keyword),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'(const|typedef)\b', Keyword.Declaration),
+ (words((
+ 'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
+ 'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
+ 'php_namespace', 'py_module', 'perl_package',
+ 'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
+ 'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
+ 'xsd_attrs', 'include'), suffix=r'\b'),
+ Keyword.Namespace),
+ (words((
+ 'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
+ 'string', 'binary', 'map', 'list', 'set', 'slist',
+ 'senum'), suffix=r'\b'),
+ Keyword.Type),
+ (words((
+ 'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
+ '__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
+ 'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
+ 'break', 'case', 'catch', 'class', 'clone', 'continue',
+ 'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
+ 'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
+ 'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
+ 'ensure', 'except', 'exec', 'finally', 'float', 'for',
+ 'foreach', 'function', 'global', 'goto', 'if', 'implements',
+ 'import', 'in', 'inline', 'instanceof', 'interface', 'is',
+ 'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
+ 'or', 'pass', 'public', 'print', 'private', 'protected',
+ 'raise', 'redo', 'rescue', 'retry', 'register', 'return',
+ 'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
+ 'then', 'this', 'throw', 'transient', 'try', 'undef',
+ 'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
+ 'volatile', 'when', 'while', 'with', 'xor', 'yield'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ ],
+ 'numbers': [
+ (r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
+ (r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
+ (r'[+-]?[0-9]+', Number.Integer),
+ ],
+ }
+
+
+class ZeekLexer(RegexLexer):
+ """
+ For Zeek scripts.
+
+ .. versionadded:: 2.5
+ """
+ name = 'Zeek'
+ url = 'https://www.zeek.org/'
+ aliases = ['zeek', 'bro']
+ filenames = ['*.zeek', '*.bro']
+
+ _hex = r'[0-9a-fA-F]'
+ _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
+ _h = r'[A-Za-z0-9][-A-Za-z0-9]*'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ include('directives'),
+ include('attributes'),
+ include('types'),
+ include('keywords'),
+ include('literals'),
+ include('operators'),
+ include('punctuation'),
+ (r'((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(?=\s*\()',
+ Name.Function),
+ include('identifiers'),
+ ],
+
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)),
+ ],
+
+ 'comments': [
+ (r'#.*$', Comment),
+ ],
+
+ 'directives': [
+ (r'@(load-plugin|load-sigs|load|unload)\b.*$', Comment.Preproc),
+ (r'@(DEBUG|DIR|FILENAME|deprecated|if|ifdef|ifndef|else|endif)\b', Comment.Preproc),
+ (r'(@prefixes)(\s*)((\+?=).*)$', bygroups(Comment.Preproc,
+ Whitespace, Comment.Preproc)),
+ ],
+
+ 'attributes': [
+ (words(('redef', 'priority', 'log', 'optional', 'default', 'add_func',
+ 'delete_func', 'expire_func', 'read_expire', 'write_expire',
+ 'create_expire', 'synchronized', 'persistent', 'rotate_interval',
+ 'rotate_size', 'encrypt', 'raw_output', 'mergeable', 'error_handler',
+ 'type_column', 'deprecated'),
+ prefix=r'&', suffix=r'\b'),
+ Keyword.Pseudo),
+ ],
+
+ 'types': [
+ (words(('any',
+ 'enum', 'record', 'set', 'table', 'vector',
+ 'function', 'hook', 'event',
+ 'addr', 'bool', 'count', 'double', 'file', 'int', 'interval',
+ 'pattern', 'port', 'string', 'subnet', 'time'),
+ suffix=r'\b'),
+ Keyword.Type),
+
+ (r'(opaque)(\s+)(of)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
+ bygroups(Keyword.Type, Whitespace, Operator.Word, Whitespace, Keyword.Type)),
+
+ (r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)(\s*)\b(record|enum)\b',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Operator, Whitespace, Keyword.Type)),
+
+ (r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)',
+ bygroups(Keyword, Whitespace, Name, Whitespace, Operator)),
+
+ (r'(redef)(\s+)(record|enum)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
+ bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Name.Class)),
+ ],
+
+ 'keywords': [
+ (words(('redef', 'export', 'if', 'else', 'for', 'while',
+ 'return', 'break', 'next', 'continue', 'fallthrough',
+ 'switch', 'default', 'case',
+ 'add', 'delete',
+ 'when', 'timeout', 'schedule'),
+ suffix=r'\b'),
+ Keyword),
+ (r'(print)\b', Keyword),
+ (r'(global|local|const|option)\b', Keyword.Declaration),
+ (r'(module)(\s+)(([A-Za-z_]\w*)(?:::([A-Za-z_]\w*))*)\b',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+ ],
+
+ 'literals': [
+ (r'"', String, 'string'),
+
+ # Not the greatest match for patterns, but generally helps
+ # disambiguate between start of a pattern and just a division
+ # operator.
+ (r'/(?=.*/)', String.Regex, 'regex'),
+
+ (r'(T|F)\b', Keyword.Constant),
+
+ # Port
+ (r'\d{1,5}/(udp|tcp|icmp|unknown)\b', Number),
+
+ # IPv4 Address
+ (r'(\d{1,3}.){3}(\d{1,3})\b', Number),
+
+ # IPv6 Address
+ (r'\[([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?((\d{1,3}.){3}(\d{1,3}))?\]', Number),
+
+ # Numeric
+ (r'0[xX]' + _hex + r'+\b', Number.Hex),
+ (_float + r'\s*(day|hr|min|sec|msec|usec)s?\b', Number.Float),
+ (_float + r'\b', Number.Float),
+ (r'(\d+)\b', Number.Integer),
+
+ # Hostnames
+ (_h + r'(\.' + _h + r')+', String),
+ ],
+
+ 'operators': [
+ (r'[!%*/+<=>~|&^-]', Operator),
+ (r'([-+=&|]{2}|[+=!><-]=)', Operator),
+ (r'(in|as|is|of)\b', Operator.Word),
+ (r'\??\$', Operator),
+ ],
+
+ 'punctuation': [
+ (r'[{}()\[\],;.]', Punctuation),
+ # The "ternary if", which uses '?' and ':', could instead be
+ # treated as an Operator, but colons are more frequently used to
+ # separate field/identifier names from their types, so the (often)
+ # less-prominent Punctuation is used even with '?' for consistency.
+ (r'[?:]', Punctuation),
+ ],
+
+ 'identifiers': [
+ (r'([a-zA-Z_]\w*)(::)', bygroups(Name, Punctuation)),
+ (r'[a-zA-Z_]\w*', Name)
+ ],
+
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'%-?[0-9]*(\.[0-9]+)?[DTd-gsx]', String.Escape),
+ (r'"', String, '#pop'),
+ (r'.', String),
+ ],
+
+ 'regex': [
+ (r'\\.', String.Escape),
+ (r'/', String.Regex, '#pop'),
+ (r'.', String.Regex),
+ ],
+ }
+
+
+BroLexer = ZeekLexer
+
+
+class PuppetLexer(RegexLexer):
+ """
+ For Puppet configuration DSL.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Puppet'
+ url = 'https://puppet.com/'
+ aliases = ['puppet']
+ filenames = ['*.pp']
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('keywords'),
+ include('names'),
+ include('numbers'),
+ include('operators'),
+ include('strings'),
+
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\s+', Whitespace),
+ ],
+
+ 'comments': [
+ (r'(\s*)(#.*)$', bygroups(Whitespace, Comment)),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ ],
+
+ 'operators': [
+ (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
+ (r'(in|and|or|not)\b', Operator.Word),
+ ],
+
+ 'names': [
+ (r'[a-zA-Z_]\w*', Name.Attribute),
+ (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
+ String, Punctuation)),
+ (r'\$\S+', Name.Variable),
+ ],
+
+ 'numbers': [
+ # Copypasta from the Python lexer
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+j?', Number.Integer)
+ ],
+
+ 'keywords': [
+ # Left out 'group' and 'require'
+ # Since they're often used as attributes
+ (words((
+ 'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
+ 'check', 'class', 'computer', 'configured', 'contained',
+ 'create_resources', 'crit', 'cron', 'debug', 'default',
+ 'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
+ 'err', 'exec', 'extlookup', 'fail', 'false', 'file',
+ 'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
+ 'include', 'info', 'inherits', 'inline_template', 'installed',
+ 'interface', 'k5login', 'latest', 'link', 'loglevel',
+ 'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
+ 'mount', 'mounted', 'nagios_command', 'nagios_contact',
+ 'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
+ 'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
+ 'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
+ 'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
+ 'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
+ 'realize', 'regsubst', 'resources', 'role', 'router', 'running',
+ 'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
+ 'service', 'sha1', 'shellquote', 'split', 'sprintf',
+ 'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
+ 'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
+ 'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
+ 'zpool'), prefix='(?i)', suffix=r'\b'),
+ Keyword),
+ ],
+
+ 'strings': [
+ (r'"([^"])*"', String),
+ (r"'(\\'|[^'])*'", String),
+ ],
+
+ }
+
+
+class RslLexer(RegexLexer):
+ """
+ RSL is the formal specification
+ language used in RAISE (Rigorous Approach to Industrial Software Engineering)
+ method.
+
+ .. versionadded:: 2.0
+ """
+ name = 'RSL'
+ url = 'http://en.wikipedia.org/wiki/RAISE'
+ aliases = ['rsl']
+ filenames = ['*.rsl']
+ mimetypes = ['text/rsl']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ (words((
+ 'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
+ 'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
+ 'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
+ 'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
+ 'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
+ 'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
+ 'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
+ 'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
+ 'type', 'union', 'until', 'use', 'value', 'variable', 'while',
+ 'with', 'write', '~isin', '-inflist', '-infset', '-list',
+ '-set'), prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (r'(variable|value)\b', Keyword.Declaration),
+ (r'--.*?\n', Comment),
+ (r'<:.*?:>', Comment),
+ (r'\{!.*?!\}', Comment),
+ (r'/\*.*?\*/', Comment),
+ (r'^([ \t]*)([\w]+)([ \t]*)(:[^:])', bygroups(Whitespace,
+ Name.Function, Whitespace, Name.Function)),
+ (r'(^[ \t]*)([\w]+)([ \t]*)(\([\w\s,]*\))([ \t]*)(is|as)',
+ bygroups(Whitespace, Name.Function, Whitespace, Text,
+ Whitespace, Keyword)),
+ (r'\b[A-Z]\w*\b', Keyword.Type),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'".*"', String),
+ (r'\'.\'', String.Char),
+ (r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
+ r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
+ Operator),
+ (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ }
+
+ def analyse_text(text):
+ """
+ Check for the most common text in the beginning of a RSL file.
+ """
+ if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
+ return 1.0
+
+
+class MscgenLexer(RegexLexer):
+ """
+ For Mscgen files.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Mscgen'
+ url = 'http://www.mcternan.me.uk/mscgen/'
+ aliases = ['mscgen', 'msc']
+ filenames = ['*.msc']
+
+ _var = r'(\w+|"(?:\\"|[^"])*")'
+
+ tokens = {
+ 'root': [
+ (r'msc\b', Keyword.Type),
+ # Options
+ (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
+ r'|arcgradient|ARCGRADIENT)\b', Name.Property),
+ # Operators
+ (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
+ (r'(\.|-|\|){3}', Keyword),
+ (r'(?:-|=|\.|:){2}'
+ r'|<<=>>|<->|<=>|<<>>|<:>'
+ r'|->|=>>|>>|=>|:>|-x|-X'
+ r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
+ # Names
+ (r'\*', Name.Builtin),
+ (_var, Name.Variable),
+ # Other
+ (r'\[', Punctuation, 'attrs'),
+ (r'\{|\}|,|;', Punctuation),
+ include('comments')
+ ],
+ 'attrs': [
+ (r'\]', Punctuation, '#pop'),
+ (_var + r'(\s*)(=)(\s*)' + _var,
+ bygroups(Name.Attribute, Whitespace, Operator, Whitespace,
+ String)),
+ (r',', Punctuation),
+ include('comments')
+ ],
+ 'comments': [
+ (r'(?://|#).*?\n', Comment.Single),
+ (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
+ (r'[ \t\r\n]+', Whitespace)
+ ]
+ }
+
+
+class VGLLexer(RegexLexer):
+ """
+ For SampleManager VGL source code.
+
+ .. versionadded:: 1.6
+ """
+ name = 'VGL'
+ url = 'http://www.thermoscientific.com/samplemanager'
+ aliases = ['vgl']
+ filenames = ['*.rpf']
+
+ flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\{[^}]*\}', Comment.Multiline),
+ (r'declare', Keyword.Constant),
+ (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
+ r'|create|on|line|with|global|routine|value|endroutine|constant'
+ r'|global|set|join|library|compile_option|file|exists|create|copy'
+ r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
+ Keyword),
+ (r'(true|false|null|empty|error|locked)', Keyword.Constant),
+ (r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
+ (r'"[^"]*"', String),
+ (r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
+ (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
+ (r'[a-z_$][\w$]*', Name),
+ (r'[\r\n]+', Whitespace),
+ (r'\s+', Whitespace)
+ ]
+ }
+
+
+class AlloyLexer(RegexLexer):
+ """
+ For Alloy source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Alloy'
+ url = 'http://alloy.mit.edu'
+ aliases = ['alloy']
+ filenames = ['*.als']
+ mimetypes = ['text/x-alloy']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ iden_rex = r'[a-zA-Z_][\w]*"*'
+ string_rex = r'"\b(\\\\|\\[^\\]|[^"\\])*"'
+ text_tuple = (r'[^\S\n]+', Whitespace)
+
+ tokens = {
+ 'sig': [
+ (r'(extends)\b', Keyword, '#pop'),
+ (iden_rex, Name),
+ text_tuple,
+ (r',', Punctuation),
+ (r'\{', Operator, '#pop'),
+ ],
+ 'module': [
+ text_tuple,
+ (iden_rex, Name, '#pop'),
+ ],
+ 'fun': [
+ text_tuple,
+ (r'\{', Operator, '#pop'),
+ (iden_rex, Name, '#pop'),
+ ],
+ 'fact': [
+ include('fun'),
+ (string_rex, String, '#pop'),
+ ],
+ 'root': [
+ (r'--.*?$', Comment.Single),
+ (r'//.*?$', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ text_tuple,
+ (r'(module|open)(\s+)', bygroups(Keyword.Namespace, Whitespace),
+ 'module'),
+ (r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'sig'),
+ (r'(iden|univ|none)\b', Keyword.Constant),
+ (r'(int|Int)\b', Keyword.Type),
+ (r'(var|this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
+ (r'(all|some|no|sum|disj|when|else)\b', Keyword),
+ (r'(run|check|for|but|exactly|expect|as|steps)\b', Keyword),
+ (r'(always|after|eventually|until|release)\b', Keyword), # future time operators
+ (r'(historically|before|once|since|triggered)\b', Keyword), # past time operators
+ (r'(and|or|implies|iff|in)\b', Operator.Word),
+ (r'(fun|pred|assert)(\s+)', bygroups(Keyword, Whitespace), 'fun'),
+ (r'(fact)(\s+)', bygroups(Keyword, Whitespace), 'fact'),
+ (r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.\.|\.|->', Operator),
+ (r'[-+/*%=<>&!^|~{}\[\]().\';]', Operator),
+ (iden_rex, Name),
+ (r'[:,]', Punctuation),
+ (r'[0-9]+', Number.Integer),
+ (string_rex, String),
+ (r'\n', Whitespace),
+ ]
+ }
+
+
+class PanLexer(RegexLexer):
+ """
+ Lexer for pan source files.
+
+ Based on tcsh lexer.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Pan'
+ url = 'https://github.com/quattor/pan/'
+ aliases = ['pan']
+ filenames = ['*.pan']
+
+ tokens = {
+ 'root': [
+ include('basic'),
+ (r'\(', Keyword, 'paren'),
+ (r'\{', Keyword, 'curly'),
+ include('data'),
+ ],
+ 'basic': [
+ (words((
+ 'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
+ 'prefix', 'unique', 'object', 'foreach', 'include', 'template',
+ 'function', 'variable', 'structure', 'extensible', 'declaration'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (words((
+ 'file_contents', 'format', 'index', 'length', 'match', 'matches',
+ 'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
+ 'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
+ 'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
+ 'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
+ 'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
+ 'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
+ 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
+ 'path_exists', 'if_exists', 'return', 'value'),
+ prefix=r'\b', suffix=r'\b'),
+ Name.Builtin),
+ (r'#.*', Comment),
+ (r'\\[\w\W]', String.Escape),
+ (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
+ (r'[\[\]{}()=]+', Operator),
+ (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+ (r';', Punctuation),
+ ],
+ 'data': [
+ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
+ (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+ (r'\s+', Whitespace),
+ (r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
+ (r'\d+(?= |\Z)', Number),
+ ],
+ 'curly': [
+ (r'\}', Keyword, '#pop'),
+ (r':-', Keyword),
+ (r'\w+', Name.Variable),
+ (r'[^}:"\'`$]+', Punctuation),
+ (r':', Punctuation),
+ include('root'),
+ ],
+ 'paren': [
+ (r'\)', Keyword, '#pop'),
+ include('root'),
+ ],
+ }
+
+
+class CrmshLexer(RegexLexer):
+ """
+ Lexer for crmsh configuration files for Pacemaker clusters.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Crmsh'
+ url = 'http://crmsh.github.io/'
+ aliases = ['crmsh', 'pcmk']
+ filenames = ['*.crmsh', '*.pcmk']
+ mimetypes = []
+
+ elem = words((
+ 'node', 'primitive', 'group', 'clone', 'ms', 'location',
+ 'colocation', 'order', 'fencing_topology', 'rsc_ticket',
+ 'rsc_template', 'property', 'rsc_defaults',
+ 'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
+ 'tag'), suffix=r'(?![\w#$-])')
+ sub = words((
+ 'params', 'meta', 'operations', 'op', 'rule',
+ 'attributes', 'utilization'), suffix=r'(?![\w#$-])')
+ acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
+ bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
+ un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
+ date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
+ acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
+ bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
+ val_qual = (r'(?:string|version|number)')
+ rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
+ r'start|promote|demote|stop)')
+
+ tokens = {
+ 'root': [
+ (r'^(#.*)(\n)?', bygroups(Comment, Whitespace)),
+ # attr=value (nvpair)
+ (r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
+ bygroups(Name.Attribute, Punctuation, String)),
+ # need this construct, otherwise numeric node ids
+ # are matched as scores
+ # elem id:
+ (r'(node)(\s+)([\w#$-]+)(:)',
+ bygroups(Keyword, Whitespace, Name, Punctuation)),
+ # scores
+ (r'([+-]?([0-9]+|inf)):', Number),
+ # keywords (elements and other)
+ (elem, Keyword),
+ (sub, Keyword),
+ (acl, Keyword),
+ # binary operators
+ (r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops), Operator.Word),
+ # other operators
+ (bin_rel, Operator.Word),
+ (un_ops, Operator.Word),
+ (date_exp, Operator.Word),
+ # builtin attributes (e.g. #uname)
+ (r'#[a-z]+(?![\w#$-])', Name.Builtin),
+ # acl_mod:blah
+ (r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
+ bygroups(Keyword, Punctuation, Name)),
+ # rsc_id[:(role|action)]
+ # NB: this matches all other identifiers
+ (r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
+ bygroups(Name, Punctuation, Operator.Word)),
+ # punctuation
+ (r'(\\(?=\n)|[\[\](){}/:@])', Punctuation),
+ (r'\s+|\n', Whitespace),
+ ],
+ }
+
+
+class FlatlineLexer(RegexLexer):
+ """
+ Lexer for Flatline expressions.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Flatline'
+ url = 'https://github.com/bigmlcom/flatline'
+ aliases = ['flatline']
+ filenames = []
+ mimetypes = ['text/x-flatline']
+
+ special_forms = ('let',)
+
+ builtins = (
+ "!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
+ "all-but", "all-with-defaults", "all-with-numeric-default", "and",
+ "asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
+ "category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
+ "count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
+ "epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
+ "epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
+ "epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
+ "first", "floor", "head", "if", "in", "integer", "language", "length",
+ "levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
+ "matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
+ "minimum", "missing", "missing-count", "missing?", "missing_count",
+ "mod", "mode", "normalize", "not", "nth", "occurrences", "or",
+ "percentile", "percentile-label", "population", "population-fraction",
+ "pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
+ "random-value", "re-quote", "real", "replace", "replace-first", "rest",
+ "round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
+ "sqrt", "square", "standard-deviation", "standard_deviation", "str",
+ "subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
+ "summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
+ "to-radians", "variance", "vectorize", "weighted-random-value", "window",
+ "winnow", "within-percentiles?", "z-score",
+ )
+
+ valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
+
+ tokens = {
+ 'root': [
+ # whitespaces - usually not relevant
+ (r'[,]+', Text),
+ (r'\s+', Whitespace),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+ (r'0x-?[a-f\d]+', Number.Hex),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"\\(.|[a-z]+)", String.Char),
+
+ # expression template placeholder
+ (r'_', String.Symbol),
+
+ # highlight the special forms
+ (words(special_forms, suffix=' '), Keyword),
+
+ # highlight the builtins
+ (words(builtins, suffix=' '), Name.Builtin),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Function),
+
+ # find the remaining variables
+ (valid_name, Name.Variable),
+
+ # parentheses
+ (r'(\(|\))', Punctuation),
+ ],
+ }
+
+
+class SnowballLexer(ExtendedRegexLexer):
+ """
+ Lexer for Snowball source code.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'Snowball'
+ url = 'http://snowballstem.org/'
+ aliases = ['snowball']
+ filenames = ['*.sbl']
+
+ _ws = r'\n\r\t '
+
+ def __init__(self, **options):
+ self._reset_stringescapes()
+ ExtendedRegexLexer.__init__(self, **options)
+
+ def _reset_stringescapes(self):
+ self._start = "'"
+ self._end = "'"
+
+ def _string(do_string_first):
+ def callback(lexer, match, ctx):
+ s = match.start()
+ text = match.group()
+ string = re.compile(r'([^%s]*)(.)' % re.escape(lexer._start)).match
+ escape = re.compile(r'([^%s]*)(.)' % re.escape(lexer._end)).match
+ pos = 0
+ do_string = do_string_first
+ while pos < len(text):
+ if do_string:
+ match = string(text, pos)
+ yield s + match.start(1), String.Single, match.group(1)
+ if match.group(2) == "'":
+ yield s + match.start(2), String.Single, match.group(2)
+ ctx.stack.pop()
+ break
+ yield s + match.start(2), String.Escape, match.group(2)
+ pos = match.end()
+ match = escape(text, pos)
+ yield s + match.start(), String.Escape, match.group()
+ if match.group(2) != lexer._end:
+ ctx.stack[-1] = 'escape'
+ break
+ pos = match.end()
+ do_string = True
+ ctx.pos = s + match.end()
+ return callback
+
+ def _stringescapes(lexer, match, ctx):
+ lexer._start = match.group(3)
+ lexer._end = match.group(5)
+ return bygroups(Keyword.Reserved, Whitespace, String.Escape, Whitespace,
+ String.Escape)(lexer, match, ctx)
+
+ tokens = {
+ 'root': [
+ (words(('len', 'lenof'), suffix=r'\b'), Operator.Word),
+ include('root1'),
+ ],
+ 'root1': [
+ (r'[%s]+' % _ws, Whitespace),
+ (r'\d+', Number.Integer),
+ (r"'", String.Single, 'string'),
+ (r'[()]', Punctuation),
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ (r'//.*', Comment.Single),
+ (r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator),
+ (words(('as', 'get', 'hex', 'among', 'define', 'decimal',
+ 'backwardmode'), suffix=r'\b'),
+ Keyword.Reserved),
+ (words(('strings', 'booleans', 'integers', 'routines', 'externals',
+ 'groupings'), suffix=r'\b'),
+ Keyword.Reserved, 'declaration'),
+ (words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try',
+ 'fail', 'goto', 'loop', 'next', 'test', 'true',
+ 'false', 'unset', 'atmark', 'attach', 'delete', 'gopast',
+ 'insert', 'repeat', 'sizeof', 'tomark', 'atleast',
+ 'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit',
+ 'backwards', 'substring'), suffix=r'\b'),
+ Operator.Word),
+ (words(('size', 'limit', 'cursor', 'maxint', 'minint'),
+ suffix=r'\b'),
+ Name.Builtin),
+ (r'(stringdef\b)([%s]*)([^%s]+)' % (_ws, _ws),
+ bygroups(Keyword.Reserved, Whitespace, String.Escape)),
+ (r'(stringescapes\b)([%s]*)(.)([%s]*)(.)' % (_ws, _ws),
+ _stringescapes),
+ (r'[A-Za-z]\w*', Name),
+ ],
+ 'declaration': [
+ (r'\)', Punctuation, '#pop'),
+ (words(('len', 'lenof'), suffix=r'\b'), Name,
+ ('root1', 'declaration')),
+ include('root1'),
+ ],
+ 'string': [
+ (r"[^']*'", _string(True)),
+ ],
+ 'escape': [
+ (r"[^']*'", _string(False)),
+ ],
+ }
+
+ def get_tokens_unprocessed(self, text=None, context=None):
+ self._reset_stringescapes()
+ return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context)
diff --git a/pygments/lexers/dylan.py b/pygments/lexers/dylan.py
new file mode 100644
index 0000000..4d0acb9
--- /dev/null
+++ b/pygments/lexers/dylan.py
@@ -0,0 +1,287 @@
+"""
+ pygments.lexers.dylan
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Dylan language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
+ default, line_re
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Literal, Whitespace
+
+__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
+
+
+class DylanLexer(RegexLexer):
+ """
+ For the Dylan language.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'Dylan'
+ url = 'http://www.opendylan.org/'
+ aliases = ['dylan']
+ filenames = ['*.dylan', '*.dyl', '*.intr']
+ mimetypes = ['text/x-dylan']
+
+ flags = re.IGNORECASE
+
+ builtins = {
+ 'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
+ 'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
+ 'each-subclass', 'exception', 'exclude', 'function', 'generic',
+ 'handler', 'inherited', 'inline', 'inline-only', 'instance',
+ 'interface', 'import', 'keyword', 'library', 'macro', 'method',
+ 'module', 'open', 'primary', 'required', 'sealed', 'sideways',
+ 'singleton', 'slot', 'thread', 'variable', 'virtual'}
+
+ keywords = {
+ 'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
+ 'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
+ 'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
+ 'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
+ 'while'}
+
+ operators = {
+ '~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
+ '>', '>=', '&', '|'}
+
+ functions = {
+ 'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
+ 'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
+ 'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
+ 'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
+ 'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
+ 'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
+ 'condition-format-arguments', 'condition-format-string', 'conjoin',
+ 'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
+ 'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
+ 'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
+ 'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
+ 'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
+ 'function-arguments', 'function-return-values',
+ 'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
+ 'generic-function-methods', 'head', 'head-setter', 'identity',
+ 'initialize', 'instance?', 'integral?', 'intersection',
+ 'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
+ 'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
+ 'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
+ 'min', 'modulo', 'negative', 'negative?', 'next-method',
+ 'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
+ 'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
+ 'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
+ 'remove-duplicates', 'remove-duplicates!', 'remove-key!',
+ 'remove-method', 'replace-elements!', 'replace-subsequence!',
+ 'restart-query', 'return-allowed?', 'return-description',
+ 'return-query', 'reverse', 'reverse!', 'round', 'round/',
+ 'row-major-index', 'second', 'second-setter', 'shallow-copy',
+ 'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
+ 'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
+ 'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
+ 'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
+ 'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
+ 'vector', 'zero?'}
+
+ valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name:
+ lowercase_value = value.lower()
+ if lowercase_value in self.builtins:
+ yield index, Name.Builtin, value
+ continue
+ if lowercase_value in self.keywords:
+ yield index, Keyword, value
+ continue
+ if lowercase_value in self.functions:
+ yield index, Name.Builtin, value
+ continue
+ if lowercase_value in self.operators:
+ yield index, Operator, value
+ continue
+ yield index, token, value
+
+ tokens = {
+ 'root': [
+ # Whitespace
+ (r'\s+', Whitespace),
+
+ # single line comment
+ (r'//.*?\n', Comment.Single),
+
+ # lid header
+ (r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
+ bygroups(Name.Attribute, Operator, Whitespace, String)),
+
+ default('code') # no header match, switch to code
+ ],
+ 'code': [
+ # Whitespace
+ (r'\s+', Whitespace),
+
+ # single line comment
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+
+ # multi-line comment
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ # strings and characters
+ (r'"', String, 'string'),
+ (r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
+
+ # binary integer
+ (r'#b[01]+', Number.Bin),
+
+ # octal integer
+ (r'#o[0-7]+', Number.Oct),
+
+ # floating point
+ (r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
+
+ # decimal integer
+ (r'[-+]?\d+', Number.Integer),
+
+ # hex integer
+ (r'#x[0-9a-f]+', Number.Hex),
+
+ # Macro parameters
+ (r'(\?' + valid_name + ')(:)'
+ r'(token|name|variable|expression|body|case-body|\*)',
+ bygroups(Name.Tag, Operator, Name.Builtin)),
+ (r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
+ bygroups(Name.Tag, Operator, Name.Builtin)),
+ (r'\?' + valid_name, Name.Tag),
+
+ # Punctuation
+ (r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
+
+ # Most operators are picked up as names and then re-flagged.
+ # This one isn't valid in a name though, so we pick it up now.
+ (r':=', Operator),
+
+ # Pick up #t / #f before we match other stuff with #.
+ (r'#[tf]', Literal),
+
+ # #"foo" style keywords
+ (r'#"', String.Symbol, 'keyword'),
+
+ # #rest, #key, #all-keys, etc.
+ (r'#[a-z0-9-]+', Keyword),
+
+ # required-init-keyword: style keywords.
+ (valid_name + ':', Keyword),
+
+ # class names
+ ('<' + valid_name + '>', Name.Class),
+
+ # define variable forms.
+ (r'\*' + valid_name + r'\*', Name.Variable.Global),
+
+ # define constant forms.
+ (r'\$' + valid_name, Name.Constant),
+
+ # everything else. We re-flag some of these in the method above.
+ (valid_name, Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'keyword': [
+ (r'"', String.Symbol, '#pop'),
+ (r'[^\\"]+', String.Symbol), # all other characters
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ]
+ }
+
+
+class DylanLidLexer(RegexLexer):
+ """
+ For Dylan LID (Library Interchange Definition) files.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'DylanLID'
+ aliases = ['dylan-lid', 'lid']
+ filenames = ['*.lid', '*.hdp']
+ mimetypes = ['text/x-dylan-lid']
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ # Whitespace
+ (r'\s+', Whitespace),
+
+ # single line comment
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+
+ # lid header
+ (r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
+ bygroups(Name.Attribute, Operator, Whitespace, String)),
+ ]
+ }
+
+
+class DylanConsoleLexer(Lexer):
+ """
+ For Dylan interactive console output like:
+
+ .. sourcecode:: dylan-console
+
+ ? let a = 1;
+ => 1
+ ? a
+ => 1
+
+ This is based on a copy of the RubyConsoleLexer.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Dylan session'
+ aliases = ['dylan-console', 'dylan-repl']
+ filenames = ['*.dylan-console']
+ mimetypes = ['text/x-dylan-console']
+
+ _prompt_re = re.compile(r'\?| ')
+
+ def get_tokens_unprocessed(self, text):
+ dylexer = DylanLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ m = self._prompt_re.match(line)
+ if m is not None:
+ end = m.end()
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:end])]))
+ curcode += line[end:]
+ else:
+ if curcode:
+ yield from do_insertions(insertions,
+ dylexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ yield match.start(), Generic.Output, line
+ if curcode:
+ yield from do_insertions(insertions,
+ dylexer.get_tokens_unprocessed(curcode))
diff --git a/pygments/lexers/ecl.py b/pygments/lexers/ecl.py
new file mode 100644
index 0000000..e092997
--- /dev/null
+++ b/pygments/lexers/ecl.py
@@ -0,0 +1,145 @@
+"""
+ pygments.lexers.ecl
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the ECL language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, words
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['ECLLexer']
+
+
+class ECLLexer(RegexLexer):
+ """
+ Lexer for the declarative big-data ECL language.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'ECL'
+ url = 'https://hpccsystems.com/training/documentation/ecl-language-reference/html'
+ aliases = ['ecl']
+ filenames = ['*.ecl']
+ mimetypes = ['application/x-ecl']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('statements'),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ (r'\/\/.*', Comment.Single),
+ (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
+ ],
+ 'statements': [
+ include('types'),
+ include('keywords'),
+ include('functions'),
+ include('hash'),
+ (r'"', String, 'string'),
+ (r'\'', String, 'string'),
+ (r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
+ (r'0x[0-9a-f]+[lu]*', Number.Hex),
+ (r'0[0-7]+[lu]*', Number.Oct),
+ (r'\d+[lu]*', Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]+', Operator),
+ (r'[{}()\[\],.;]', Punctuation),
+ (r'[a-z_]\w*', Name),
+ ],
+ 'hash': [
+ (r'^#.*$', Comment.Preproc),
+ ],
+ 'types': [
+ (r'(RECORD|END)\D', Keyword.Declaration),
+ (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
+ r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
+ r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
+ bygroups(Keyword.Type, Whitespace)),
+ ],
+ 'keywords': [
+ (words((
+ 'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
+ 'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
+ 'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
+ 'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
+ 'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
+ 'WAIT', 'WHEN'), suffix=r'\b'),
+ Keyword.Reserved),
+ # These are classed differently, check later
+ (words((
+ 'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST',
+ 'BETWEEN', 'CASE', 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT',
+ 'ENDC++', 'ENDMACRO', 'EXCEPT', 'EXCLUSIVE', 'EXPIRE', 'EXPORT',
+ 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', 'FUNCTION',
+ 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN',
+ 'JOINED', 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL',
+ 'LOCALE', 'LOOKUP', 'MACRO', 'MANY', 'MAXCOUNT', 'MAXLENGTH',
+ 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', 'NOROOT',
+ 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER',
+ 'OVERWRITE', 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH',
+ 'PIPE', 'QUOTE', 'RELATIONSHIP', 'REPEAT', 'RETURN', 'RIGHT',
+ 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', 'SKIP',
+ 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN',
+ 'TRANSFORM', 'TRIM', 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED',
+ 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', 'WITHIN', 'XML', 'XPATH',
+ '__COMPRESSED__'), suffix=r'\b'),
+ Keyword.Reserved),
+ ],
+ 'functions': [
+ (words((
+ 'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN',
+ 'ATAN2', 'AVE', 'CASE', 'CHOOSE', 'CHOOSEN', 'CHOOSESETS',
+ 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', 'COSH', 'COUNT',
+ 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
+ 'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH',
+ 'ERROR', 'EVALUATE', 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS',
+ 'EXP', 'FAILCODE', 'FAILMESSAGE', 'FETCH', 'FROMUNICODE',
+ 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
+ 'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX',
+ 'INTFORMAT', 'ISVALID', 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH',
+ 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', 'MAP', 'MATCHED',
+ 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', 'MAX',
+ 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE',
+ 'PARSE', 'PIPE', 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL',
+ 'RANDOM', 'RANGE', 'RANK', 'RANKED', 'REALFORMAT', 'RECORDOF',
+ 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', 'ROLLUP',
+ 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN',
+ 'SINH', 'SIZEOF', 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED',
+ 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', 'THISNODE', 'TOPN',
+ 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
+ 'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE',
+ 'XMLENCODE', 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
+ Name.Function),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\'', String, '#pop'),
+ (r'[^"\']+', String),
+ ],
+ }
+
+ def analyse_text(text):
+ """This is very difficult to guess relative to other business languages.
+ -> in conjunction with BEGIN/END seems relatively rare though."""
+ result = 0
+
+ if '->' in text:
+ result += 0.01
+ if 'BEGIN' in text:
+ result += 0.01
+ if 'END' in text:
+ result += 0.01
+
+ return result
diff --git a/pygments/lexers/eiffel.py b/pygments/lexers/eiffel.py
new file mode 100644
index 0000000..db7858c
--- /dev/null
+++ b/pygments/lexers/eiffel.py
@@ -0,0 +1,69 @@
+"""
+ pygments.lexers.eiffel
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Eiffel language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words, bygroups
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
+ Punctuation, Whitespace
+
+__all__ = ['EiffelLexer']
+
+
+class EiffelLexer(RegexLexer):
+ """
+ For Eiffel source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Eiffel'
+ url = 'http://www.eiffel.com'
+ aliases = ['eiffel']
+ filenames = ['*.e']
+ mimetypes = ['text/x-eiffel']
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Whitespace),
+ (r'--.*?$', Comment.Single),
+ (r'[^\S\n]+', Whitespace),
+ # Please note that keyword and operator are case insensitive.
+ (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
+ (r'(?i)(not|xor|implies|or)\b', Operator.Word),
+ (r'(?i)(and)(?:(\s+)(then))?\b',
+ bygroups(Operator.Word, Whitespace, Operator.Word)),
+ (r'(?i)(or)(?:(\s+)(else))?\b',
+ bygroups(Operator.Word, Whitespace, Operator.Word)),
+ (words((
+ 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached',
+ 'attribute', 'check', 'class', 'convert', 'create', 'debug',
+ 'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure',
+ 'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if',
+ 'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none',
+ 'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename',
+ 'require', 'rescue', 'retry', 'select', 'separate', 'then',
+ 'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'),
+ Keyword.Reserved),
+ (r'"\[([^\]%]|%(.|\n)|\][^"])*?\]"', String),
+ (r'"([^"%\n]|%.)*?"', String),
+ include('numbers'),
+ (r"'([^'%]|%'|%%)'", String.Char),
+ (r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator),
+ (r"([{}():;,.])", Punctuation),
+ (r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
+ (r'([A-Z][A-Z0-9_]*)', Name.Class),
+ (r'\n+', Whitespace),
+ ],
+ 'numbers': [
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'0[bB][01]+', Number.Bin),
+ (r'0[cC][0-7]+', Number.Oct),
+ (r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
diff --git a/pygments/lexers/elm.py b/pygments/lexers/elm.py
new file mode 100644
index 0000000..4174f2b
--- /dev/null
+++ b/pygments/lexers/elm.py
@@ -0,0 +1,124 @@
+"""
+ pygments.lexers.elm
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Elm programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include, bygroups
+from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
+ String, Whitespace
+
+__all__ = ['ElmLexer']
+
+
+class ElmLexer(RegexLexer):
+ """
+ For Elm source code.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Elm'
+ url = 'http://elm-lang.org/'
+ aliases = ['elm']
+ filenames = ['*.elm']
+ mimetypes = ['text/x-elm']
+
+ validName = r'[a-z_][a-zA-Z0-9_\']*'
+
+ specialName = r'^main '
+
+ builtinOps = (
+ '~', '||', '|>', '|', '`', '^', '\\', '\'', '>>', '>=', '>', '==',
+ '=', '<~', '<|', '<=', '<<', '<-', '<', '::', ':', '/=', '//', '/',
+ '..', '.', '->', '-', '++', '+', '*', '&&', '%',
+ )
+
+ reservedWords = words((
+ 'alias', 'as', 'case', 'else', 'if', 'import', 'in',
+ 'let', 'module', 'of', 'port', 'then', 'type', 'where',
+ ), suffix=r'\b')
+
+ tokens = {
+ 'root': [
+
+ # Comments
+ (r'\{-', Comment.Multiline, 'comment'),
+ (r'--.*', Comment.Single),
+
+ # Whitespace
+ (r'\s+', Whitespace),
+
+ # Strings
+ (r'"', String, 'doublequote'),
+
+ # Modules
+ (r'^(\s*)(module)(\s*)', bygroups(Whitespace, Keyword.Namespace,
+ Whitespace), 'imports'),
+
+ # Imports
+ (r'^(\s*)(import)(\s*)', bygroups(Whitespace, Keyword.Namespace,
+ Whitespace), 'imports'),
+
+ # Shaders
+ (r'\[glsl\|.*', Name.Entity, 'shader'),
+
+ # Keywords
+ (reservedWords, Keyword.Reserved),
+
+ # Types
+ (r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
+
+ # Main
+ (specialName, Keyword.Reserved),
+
+ # Prefix Operators
+ (words((builtinOps), prefix=r'\(', suffix=r'\)'), Name.Function),
+
+ # Infix Operators
+ (words(builtinOps), Name.Function),
+
+ # Numbers
+ include('numbers'),
+
+ # Variable Names
+ (validName, Name.Variable),
+
+ # Parens
+ (r'[,()\[\]{}]', Punctuation),
+
+ ],
+
+ 'comment': [
+ (r'-(?!\})', Comment.Multiline),
+ (r'\{-', Comment.Multiline, 'comment'),
+ (r'[^-}]', Comment.Multiline),
+ (r'-\}', Comment.Multiline, '#pop'),
+ ],
+
+ 'doublequote': [
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\[nrfvb\\"]', String.Escape),
+ (r'[^"]', String),
+ (r'"', String, '#pop'),
+ ],
+
+ 'imports': [
+ (r'\w+(\.\w+)*', Name.Class, '#pop'),
+ ],
+
+ 'numbers': [
+ (r'_?\d+\.(?=\d+)', Number.Float),
+ (r'_?\d+', Number.Integer),
+ ],
+
+ 'shader': [
+ (r'\|(?!\])', Name.Entity),
+ (r'\|\]', Name.Entity, '#pop'),
+ (r'(.*)(\n)', bygroups(Name.Entity, Whitespace)),
+ ],
+ }
diff --git a/pygments/lexers/elpi.py b/pygments/lexers/elpi.py
new file mode 100644
index 0000000..4fb3ace
--- /dev/null
+++ b/pygments/lexers/elpi.py
@@ -0,0 +1,165 @@
+"""
+ pygments.lexers.elpi
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the `Elpi <http://github.com/LPCIC/elpi>`_ programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number
+
+__all__ = ['ElpiLexer']
+
+
+class ElpiLexer(RegexLexer):
+ """
+ Lexer for the Elpi programming language.
+
+ .. versionadded:: 2.11
+ """
+
+ name = 'Elpi'
+ url = 'http://github.com/LPCIC/elpi'
+ aliases = ['elpi']
+ filenames = ['*.elpi']
+ mimetypes = ['text/x-elpi']
+
+ lcase_re = r"[a-z]"
+ ucase_re = r"[A-Z]"
+ digit_re = r"[0-9]"
+ schar2_re = r"([+*^?/<>`'@#~=&!])"
+ schar_re = r"({}|-|\$|_)".format(schar2_re)
+ idchar_re = r"({}|{}|{}|{})".format(lcase_re,ucase_re,digit_re,schar_re)
+ idcharstarns_re = r"({}*(\.({}|{}){}*)*)".format(idchar_re, lcase_re, ucase_re, idchar_re)
+ symbchar_re = r"({}|{}|{}|{}|:)".format(lcase_re, ucase_re, digit_re, schar_re)
+ constant_re = r"({}{}*|{}{}|{}{}*|_{}+)".format(ucase_re, idchar_re, lcase_re, idcharstarns_re, schar2_re, symbchar_re, idchar_re)
+ symbol_re = r"(,|<=>|->|:-|;|\?-|->|&|=>|\bas\b|\buvar\b|<|=<|=|==|>=|>|\bi<|\bi=<|\bi>=|\bi>|\bis\b|\br<|\br=<|\br>=|\br>|\bs<|\bs=<|\bs>=|\bs>|@|::|\[\]|`->|`:|`:=|\^|-|\+|\bi-|\bi\+|r-|r\+|/|\*|\bdiv\b|\bi\*|\bmod\b|\br\*|~|\bi~|\br~)"
+ escape_re = r"\(({}|{})\)".format(constant_re,symbol_re)
+ const_sym_re = r"({}|{}|{})".format(constant_re,symbol_re,escape_re)
+
+ tokens = {
+ 'root': [
+ include('elpi')
+ ],
+
+ 'elpi': [
+ include('_elpi-comment'),
+
+ (r"(:before|:after|:if|:name)(\s*)(\")",
+ bygroups(Keyword.Mode, Text.Whitespace, String.Double),
+ 'elpi-string'),
+ (r"(:index)(\s*\()", bygroups(Keyword.Mode, Text.Whitespace),
+ 'elpi-indexing-expr'),
+ (r"\b(external pred|pred)(\s+)({})".format(const_sym_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
+ 'elpi-pred-item'),
+ (r"\b(external type|type)(\s+)(({}(,\s*)?)+)".format(const_sym_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
+ 'elpi-type'),
+ (r"\b(kind)(\s+)(({}|,)+)".format(const_sym_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
+ 'elpi-type'),
+ (r"\b(typeabbrev)(\s+)({})".format(const_sym_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
+ 'elpi-type'),
+ (r"\b(accumulate)(\s+)(\")",
+ bygroups(Keyword.Declaration, Text.Whitespace, String.Double),
+ 'elpi-string'),
+ (r"\b(accumulate|namespace|local)(\s+)({})".format(constant_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Text)),
+ (r"\b(shorten)(\s+)({}\.)".format(constant_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Text)),
+ (r"\b(pi|sigma)(\s+)([a-zA-Z][A-Za-z0-9_ ]*)(\\)",
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, Text)),
+ (r"\b(constraint)(\s+)(({}(\s+)?)+)".format(const_sym_re),
+ bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
+ 'elpi-chr-rule-start'),
+
+ (r"(?=[A-Z_]){}".format(constant_re), Name.Variable),
+ (r"(?=[a-z_]){}\\".format(constant_re), Name.Variable),
+ (r"_", Name.Variable),
+ (r"({}|!|=>|;)".format(symbol_re), Keyword.Declaration),
+ (constant_re, Text),
+ (r"\[|\]|\||=>", Keyword.Declaration),
+ (r'"', String.Double, 'elpi-string'),
+ (r'`', String.Double, 'elpi-btick'),
+ (r'\'', String.Double, 'elpi-tick'),
+ (r'\{[^\{]', Text, 'elpi-spill'),
+ (r"\(", Text, 'elpi-in-parens'),
+ (r'\d[\d_]*', Number.Integer),
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+ (r"[\+\*\-/\^\.]", Operator),
+ ],
+ '_elpi-comment': [
+ (r'%[^\n]*\n', Comment),
+ (r'/\*', Comment, 'elpi-multiline-comment'),
+ (r"\s+", Text.Whitespace),
+ ],
+ 'elpi-multiline-comment': [
+ (r'\*/', Comment, '#pop'),
+ (r'.', Comment)
+ ],
+ 'elpi-indexing-expr':[
+ (r'[0-9 _]+', Number.Integer),
+ (r'\)', Text, '#pop'),
+ ],
+ 'elpi-type': [
+ (r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'),
+ (r'->', Keyword.Type),
+ (constant_re, Keyword.Type),
+ (r"\(|\)", Keyword.Type),
+ (r"\.", Text, '#pop'),
+ include('_elpi-comment'),
+ ],
+ 'elpi-chr-rule-start': [
+ (r"\{", Text, 'elpi-chr-rule'),
+ include('_elpi-comment'),
+ ],
+ 'elpi-chr-rule': [
+ (r"\brule\b", Keyword.Declaration),
+ (r"\\", Keyword.Declaration),
+ (r"\}", Text, '#pop:2'),
+ include('elpi'),
+ ],
+ 'elpi-pred-item': [
+ (r"[io]:", Keyword.Mode, 'elpi-ctype'),
+ (r"\.", Text, '#pop'),
+ include('_elpi-comment'),
+ ],
+ 'elpi-ctype': [
+ (r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'),
+ (r'->', Keyword.Type),
+ (constant_re, Keyword.Type),
+ (r"\(|\)", Keyword.Type),
+ (r",", Text, '#pop'),
+ (r"\.", Text, '#pop:2'),
+ include('_elpi-comment'),
+ ],
+ 'elpi-btick': [
+ (r'[^` ]+', String.Double),
+ (r'`', String.Double, '#pop'),
+ ],
+ 'elpi-tick': [
+ (r'[^\' ]+', String.Double),
+ (r'\'', String.Double, '#pop'),
+ ],
+ 'elpi-string': [
+ (r'[^\"]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'elpi-spill': [
+ (r'\{[^\{]', Text, '#push'),
+ (r'\}[^\}]', Text, '#pop'),
+ include('elpi'),
+ ],
+ 'elpi-in-parens': [
+ (r"\(", Operator, '#push'),
+ (r"\)", Operator, '#pop'),
+ include('elpi'),
+ ],
+
+ }
diff --git a/pygments/lexers/email.py b/pygments/lexers/email.py
new file mode 100644
index 0000000..1de01ff
--- /dev/null
+++ b/pygments/lexers/email.py
@@ -0,0 +1,132 @@
+"""
+ pygments.lexers.email
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the raw E-mail.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, DelegatingLexer, bygroups
+from pygments.lexers.mime import MIMELexer
+from pygments.token import Text, Keyword, Name, String, Number, Comment
+from pygments.util import get_bool_opt
+
+__all__ = ["EmailLexer"]
+
+
+class EmailHeaderLexer(RegexLexer):
+ """
+ Sub-lexer for raw E-mail. This lexer only process header part of e-mail.
+
+ .. versionadded:: 2.5
+ """
+
+ def __init__(self, **options):
+ super().__init__(**options)
+ self.highlight_x = get_bool_opt(options, "highlight-X-header", False)
+
+ def get_x_header_tokens(self, match):
+ if self.highlight_x:
+ # field
+ yield match.start(1), Name.Tag, match.group(1)
+
+ # content
+ default_actions = self.get_tokens_unprocessed(
+ match.group(2), stack=("root", "header"))
+ yield from default_actions
+ else:
+ # lowlight
+ yield match.start(1), Comment.Special, match.group(1)
+ yield match.start(2), Comment.Multiline, match.group(2)
+
+ tokens = {
+ "root": [
+ (r"^(?:[A-WYZ]|X400)[\w\-]*:", Name.Tag, "header"),
+ (r"^(X-(?:\w[\w\-]*:))([\s\S]*?\n)(?![ \t])", get_x_header_tokens),
+ ],
+ "header": [
+ # folding
+ (r"\n[ \t]", Text.Whitespace),
+ (r"\n(?![ \t])", Text.Whitespace, "#pop"),
+
+ # keywords
+ (r"\bE?SMTPS?\b", Keyword),
+ (r"\b(?:HE|EH)LO\b", Keyword),
+
+ # mailbox
+ (r"[\w\.\-\+=]+@[\w\.\-]+", Name.Label),
+ (r"<[\w\.\-\+=]+@[\w\.\-]+>", Name.Label),
+
+ # domain
+ (r"\b(\w[\w\.-]*\.[\w\.-]*\w[a-zA-Z]+)\b", Name.Function),
+
+ # IPv4
+ (r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0"
+ r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)",
+ Number.Integer),
+
+ # IPv6
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,7}:(?!\b)", Number.Hex),
+ (r"(?<=\b):((:[0-9a-fA-F]{1,4}){1,7}|:)(?=\b)", Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}(?=\b)", Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}(?=\b)", Number.Hex),
+ (r"(?<=\b)[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})(?=\b)", Number.Hex),
+ (r"(?<=\b)fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}(?=\b)", Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}(?=\b)", Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}(?=\b)",
+ Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}(?=\b)",
+ Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}(?=\b)",
+ Number.Hex),
+ (r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}"
+ r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r"[0-9])(?=\b)",
+ Number.Hex),
+ (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9])"
+ r"{0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])(?=\b)",
+ Number.Hex),
+
+ # Date time
+ (r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3["
+ r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+("
+ r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])"
+ r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-"
+ r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)",
+ Name.Decorator),
+
+ # RFC-2047 encoded string
+ (r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\"
+ r"\]^_`{|}~]+)(\?=)",
+ bygroups(String.Affix, Name.Constant, String.Affix, Keyword.Constant,
+ String.Affix, Number.Hex, String.Affix)),
+
+ # others
+ (r'[\s]+', Text.Whitespace),
+ (r'[\S]', Text),
+ ],
+ }
+
+
+class EmailLexer(DelegatingLexer):
+ """
+ Lexer for raw E-mail.
+
+ Additional options accepted:
+
+ `highlight-X-header`
+ Highlight the fields of ``X-`` user-defined email header. (default:
+ ``False``).
+
+ .. versionadded:: 2.5
+ """
+
+ name = "E-mail"
+ aliases = ["email", "eml"]
+ filenames = ["*.eml"]
+ mimetypes = ["message/rfc822"]
+
+ def __init__(self, **options):
+ super().__init__(EmailHeaderLexer, MIMELexer, Comment, **options)
diff --git a/pygments/lexers/erlang.py b/pygments/lexers/erlang.py
new file mode 100644
index 0000000..c14cbe5
--- /dev/null
+++ b/pygments/lexers/erlang.py
@@ -0,0 +1,528 @@
+"""
+ pygments.lexers.erlang
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Erlang.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
+ include, default, line_re
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+
+__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
+ 'ElixirLexer']
+
+
+class ErlangLexer(RegexLexer):
+ """
+ For the Erlang functional programming language.
+
+ .. versionadded:: 0.9
+ """
+
+ name = 'Erlang'
+ url = 'https://www.erlang.org/'
+ aliases = ['erlang']
+ filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
+ mimetypes = ['text/x-erlang']
+
+ keywords = (
+ 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
+ 'let', 'of', 'query', 'receive', 'try', 'when',
+ )
+
+ builtins = ( # See erlang(3) man page
+ 'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
+ 'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
+ 'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
+ 'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
+ 'float', 'float_to_list', 'fun_info', 'fun_to_list',
+ 'function_exported', 'garbage_collect', 'get', 'get_keys',
+ 'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
+ 'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
+ 'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
+ 'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
+ 'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
+ 'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
+ 'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
+ 'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
+ 'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
+ 'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
+ 'pid_to_list', 'port_close', 'port_command', 'port_connect',
+ 'port_control', 'port_call', 'port_info', 'port_to_list',
+ 'process_display', 'process_flag', 'process_info', 'purge_module',
+ 'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
+ 'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
+ 'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
+ 'spawn_opt', 'split_binary', 'start_timer', 'statistics',
+ 'suspend_process', 'system_flag', 'system_info', 'system_monitor',
+ 'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
+ 'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
+ 'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
+ )
+
+ operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
+ word_operators = (
+ 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
+ 'div', 'not', 'or', 'orelse', 'rem', 'xor'
+ )
+
+ atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
+
+ variable_re = r'(?:[A-Z_]\w*)'
+
+ esc_char_re = r'[bdefnrstv\'"\\]'
+ esc_octal_re = r'[0-7][0-7]?[0-7]?'
+ esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
+ esc_ctrl_re = r'\^[a-zA-Z]'
+ escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
+
+ macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
+
+ base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(%.*)(\n)', bygroups(Comment, Whitespace)),
+ (words(keywords, suffix=r'\b'), Keyword),
+ (words(builtins, suffix=r'\b'), Name.Builtin),
+ (words(word_operators, suffix=r'\b'), Operator.Word),
+ (r'^-', Punctuation, 'directive'),
+ (operators, Operator),
+ (r'"', String, 'string'),
+ (r'<<', Name.Label),
+ (r'>>', Name.Label),
+ ('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
+ ('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
+ bygroups(Name.Function, Whitespace, Punctuation)),
+ (r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
+ (r'[+-]?\d+', Number.Integer),
+ (r'[+-]?\d+.\d+', Number.Float),
+ (r'[]\[:_@\".{}()|;,]', Punctuation),
+ (variable_re, Name.Variable),
+ (atom_re, Name),
+ (r'\?'+macro_re, Name.Constant),
+ (r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
+ (r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
+
+ # Erlang script shebang
+ (r'\A#!.+\n', Comment.Hashbang),
+
+ # EEP 43: Maps
+ # http://www.erlang.org/eeps/eep-0043.html
+ (r'#\{', Punctuation, 'map_key'),
+ ],
+ 'string': [
+ (escape_re, String.Escape),
+ (r'"', String, '#pop'),
+ (r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
+ (r'[^"\\~]+', String),
+ (r'~', String),
+ ],
+ 'directive': [
+ (r'(define)(\s*)(\()('+macro_re+r')',
+ bygroups(Name.Entity, Whitespace, Punctuation, Name.Constant), '#pop'),
+ (r'(record)(\s*)(\()('+macro_re+r')',
+ bygroups(Name.Entity, Whitespace, Punctuation, Name.Label), '#pop'),
+ (atom_re, Name.Entity, '#pop'),
+ ],
+ 'map_key': [
+ include('root'),
+ (r'=>', Punctuation, 'map_val'),
+ (r':=', Punctuation, 'map_val'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'map_val': [
+ include('root'),
+ (r',', Punctuation, '#pop'),
+ (r'(?=\})', Punctuation, '#pop'),
+ ],
+ }
+
+
+class ErlangShellLexer(Lexer):
+ """
+ Shell sessions in erl (for Erlang code).
+
+ .. versionadded:: 1.1
+ """
+ name = 'Erlang erl session'
+ aliases = ['erl']
+ filenames = ['*.erl-sh']
+ mimetypes = ['text/x-erl-shellsession']
+
+ _prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)')
+
+ def get_tokens_unprocessed(self, text):
+ erlexer = ErlangLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ m = self._prompt_re.match(line)
+ if m is not None:
+ end = m.end()
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:end])]))
+ curcode += line[end:]
+ else:
+ if curcode:
+ yield from do_insertions(insertions,
+ erlexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ if line.startswith('*'):
+ yield match.start(), Generic.Traceback, line
+ else:
+ yield match.start(), Generic.Output, line
+ if curcode:
+ yield from do_insertions(insertions,
+ erlexer.get_tokens_unprocessed(curcode))
+
+
+def gen_elixir_string_rules(name, symbol, token):
+ states = {}
+ states['string_' + name] = [
+ (r'[^#%s\\]+' % (symbol,), token),
+ include('escapes'),
+ (r'\\.', token),
+ (r'(%s)' % (symbol,), bygroups(token), "#pop"),
+ include('interpol')
+ ]
+ return states
+
+
+def gen_elixir_sigstr_rules(term, term_class, token, interpol=True):
+ if interpol:
+ return [
+ (r'[^#%s\\]+' % (term_class,), token),
+ include('escapes'),
+ (r'\\.', token),
+ (r'%s[a-zA-Z]*' % (term,), token, '#pop'),
+ include('interpol')
+ ]
+ else:
+ return [
+ (r'[^%s\\]+' % (term_class,), token),
+ (r'\\.', token),
+ (r'%s[a-zA-Z]*' % (term,), token, '#pop'),
+ ]
+
+
+class ElixirLexer(RegexLexer):
+ """
+ For the Elixir language.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Elixir'
+ url = 'http://elixir-lang.org'
+ aliases = ['elixir', 'ex', 'exs']
+ filenames = ['*.ex', '*.eex', '*.exs', '*.leex']
+ mimetypes = ['text/x-elixir']
+
+ KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
+ KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
+ BUILTIN = (
+ 'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
+ 'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
+ )
+ BUILTIN_DECLARATION = (
+ 'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
+ 'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
+ )
+
+ BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
+ CONSTANT = ('nil', 'true', 'false')
+
+ PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
+
+ OPERATORS3 = (
+ '<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
+ '~>>', '<~>', '|~>', '<|>',
+ )
+ OPERATORS2 = (
+ '==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
+ '->', '<-', '|', '.', '=', '~>', '<~',
+ )
+ OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
+
+ PUNCTUATION = (
+ '\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
+ )
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name:
+ if value in self.KEYWORD:
+ yield index, Keyword, value
+ elif value in self.KEYWORD_OPERATOR:
+ yield index, Operator.Word, value
+ elif value in self.BUILTIN:
+ yield index, Keyword, value
+ elif value in self.BUILTIN_DECLARATION:
+ yield index, Keyword.Declaration, value
+ elif value in self.BUILTIN_NAMESPACE:
+ yield index, Keyword.Namespace, value
+ elif value in self.CONSTANT:
+ yield index, Name.Constant, value
+ elif value in self.PSEUDO_VAR:
+ yield index, Name.Builtin.Pseudo, value
+ else:
+ yield index, token, value
+ else:
+ yield index, token, value
+
+ def gen_elixir_sigil_rules():
+ # all valid sigil terminators (excluding heredocs)
+ terminators = [
+ (r'\{', r'\}', '}', 'cb'),
+ (r'\[', r'\]', r'\]', 'sb'),
+ (r'\(', r'\)', ')', 'pa'),
+ ('<', '>', '>', 'ab'),
+ ('/', '/', '/', 'slas'),
+ (r'\|', r'\|', '|', 'pipe'),
+ ('"', '"', '"', 'quot'),
+ ("'", "'", "'", 'apos'),
+ ]
+
+ # heredocs have slightly different rules
+ triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
+
+ token = String.Other
+ states = {'sigils': []}
+
+ for term, name in triquotes:
+ states['sigils'] += [
+ (r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
+ (name + '-end', name + '-intp')),
+ (r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
+ (name + '-end', name + '-no-intp')),
+ ]
+
+ states[name + '-end'] = [
+ (r'[a-zA-Z]+', token, '#pop'),
+ default('#pop'),
+ ]
+ states[name + '-intp'] = [
+ (r'^(\s*)(' + term + ')', bygroups(Whitespace, String.Heredoc), '#pop'),
+ include('heredoc_interpol'),
+ ]
+ states[name + '-no-intp'] = [
+ (r'^(\s*)(' + term +')', bygroups(Whitespace, String.Heredoc), '#pop'),
+ include('heredoc_no_interpol'),
+ ]
+
+ for lterm, rterm, rterm_class, name in terminators:
+ states['sigils'] += [
+ (r'~[a-z]' + lterm, token, name + '-intp'),
+ (r'~[A-Z]' + lterm, token, name + '-no-intp'),
+ ]
+ states[name + '-intp'] = \
+ gen_elixir_sigstr_rules(rterm, rterm_class, token)
+ states[name + '-no-intp'] = \
+ gen_elixir_sigstr_rules(rterm, rterm_class, token, interpol=False)
+
+ return states
+
+ op3_re = "|".join(re.escape(s) for s in OPERATORS3)
+ op2_re = "|".join(re.escape(s) for s in OPERATORS2)
+ op1_re = "|".join(re.escape(s) for s in OPERATORS1)
+ ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
+ punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
+ alnum = r'\w'
+ name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
+ modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
+ complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
+ special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
+
+ long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
+ hex_char_re = r'(\\x[\da-fA-F]{1,2})'
+ escape_char_re = r'(\\[abdefnrstv])'
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#.*$', Comment.Single),
+
+ # Various kinds of characters
+ (r'(\?)' + long_hex_char_re,
+ bygroups(String.Char,
+ String.Escape, Number.Hex, String.Escape)),
+ (r'(\?)' + hex_char_re,
+ bygroups(String.Char, String.Escape)),
+ (r'(\?)' + escape_char_re,
+ bygroups(String.Char, String.Escape)),
+ (r'\?\\?.', String.Char),
+
+ # '::' has to go before atoms
+ (r':::', String.Symbol),
+ (r'::', Operator),
+
+ # atoms
+ (r':' + special_atom_re, String.Symbol),
+ (r':' + complex_name_re, String.Symbol),
+ (r':"', String.Symbol, 'string_double_atom'),
+ (r":'", String.Symbol, 'string_single_atom'),
+
+ # [keywords: ...]
+ (r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
+ bygroups(String.Symbol, Punctuation)),
+
+ # @attributes
+ (r'@' + name_re, Name.Attribute),
+
+ # identifiers
+ (name_re, Name),
+ (r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
+
+ # operators and punctuation
+ (op3_re, Operator),
+ (op2_re, Operator),
+ (punctuation_re, Punctuation),
+ (r'&\d', Name.Entity), # anon func arguments
+ (op1_re, Operator),
+
+ # numbers
+ (r'0b[01]+', Number.Bin),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[\da-fA-F]+', Number.Hex),
+ (r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
+ (r'\d(_?\d)*', Number.Integer),
+
+ # strings and heredocs
+ (r'(""")(\s*)', bygroups(String.Heredoc, Whitespace),
+ 'heredoc_double'),
+ (r"(''')(\s*)$", bygroups(String.Heredoc, Whitespace),
+ 'heredoc_single'),
+ (r'"', String.Double, 'string_double'),
+ (r"'", String.Single, 'string_single'),
+
+ include('sigils'),
+
+ (r'%\{', Punctuation, 'map_key'),
+ (r'\{', Punctuation, 'tuple'),
+ ],
+ 'heredoc_double': [
+ (r'^(\s*)(""")', bygroups(Whitespace, String.Heredoc), '#pop'),
+ include('heredoc_interpol'),
+ ],
+ 'heredoc_single': [
+ (r"^\s*'''", String.Heredoc, '#pop'),
+ include('heredoc_interpol'),
+ ],
+ 'heredoc_interpol': [
+ (r'[^#\\\n]+', String.Heredoc),
+ include('escapes'),
+ (r'\\.', String.Heredoc),
+ (r'\n+', String.Heredoc),
+ include('interpol'),
+ ],
+ 'heredoc_no_interpol': [
+ (r'[^\\\n]+', String.Heredoc),
+ (r'\\.', String.Heredoc),
+ (r'\n+', Whitespace),
+ ],
+ 'escapes': [
+ (long_hex_char_re,
+ bygroups(String.Escape, Number.Hex, String.Escape)),
+ (hex_char_re, String.Escape),
+ (escape_char_re, String.Escape),
+ ],
+ 'interpol': [
+ (r'#\{', String.Interpol, 'interpol_string'),
+ ],
+ 'interpol_string': [
+ (r'\}', String.Interpol, "#pop"),
+ include('root')
+ ],
+ 'map_key': [
+ include('root'),
+ (r':', Punctuation, 'map_val'),
+ (r'=>', Punctuation, 'map_val'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'map_val': [
+ include('root'),
+ (r',', Punctuation, '#pop'),
+ (r'(?=\})', Punctuation, '#pop'),
+ ],
+ 'tuple': [
+ include('root'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ }
+ tokens.update(gen_elixir_string_rules('double', '"', String.Double))
+ tokens.update(gen_elixir_string_rules('single', "'", String.Single))
+ tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
+ tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
+ tokens.update(gen_elixir_sigil_rules())
+
+
+class ElixirConsoleLexer(Lexer):
+ """
+ For Elixir interactive console (iex) output like:
+
+ .. sourcecode:: iex
+
+ iex> [head | tail] = [1,2,3]
+ [1,2,3]
+ iex> head
+ 1
+ iex> tail
+ [2,3]
+ iex> [head | tail]
+ [1,2,3]
+ iex> length [head | tail]
+ 3
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Elixir iex session'
+ aliases = ['iex']
+ mimetypes = ['text/x-elixir-shellsession']
+
+ _prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ')
+
+ def get_tokens_unprocessed(self, text):
+ exlexer = ElixirLexer(**self.options)
+
+ curcode = ''
+ in_error = False
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('** '):
+ in_error = True
+ insertions.append((len(curcode),
+ [(0, Generic.Error, line[:-1])]))
+ curcode += line[-1:]
+ else:
+ m = self._prompt_re.match(line)
+ if m is not None:
+ in_error = False
+ end = m.end()
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:end])]))
+ curcode += line[end:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, exlexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ token = Generic.Error if in_error else Generic.Output
+ yield match.start(), token, line
+ if curcode:
+ yield from do_insertions(
+ insertions, exlexer.get_tokens_unprocessed(curcode))
diff --git a/pygments/lexers/esoteric.py b/pygments/lexers/esoteric.py
new file mode 100644
index 0000000..aa09463
--- /dev/null
+++ b/pygments/lexers/esoteric.py
@@ -0,0 +1,301 @@
+"""
+ pygments.lexers.esoteric
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for esoteric languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words, bygroups
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
+ Punctuation, Error, Whitespace
+
+__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer',
+ 'CapDLLexer', 'AheuiLexer']
+
+
+class BrainfuckLexer(RegexLexer):
+ """
+ Lexer for the esoteric BrainFuck language.
+ """
+
+ name = 'Brainfuck'
+ url = 'http://www.muppetlabs.com/~breadbox/bf/'
+ aliases = ['brainfuck', 'bf']
+ filenames = ['*.bf', '*.b']
+ mimetypes = ['application/x-brainfuck']
+
+ tokens = {
+ 'common': [
+ # use different colors for different instruction types
+ (r'[.,]+', Name.Tag),
+ (r'[+-]+', Name.Builtin),
+ (r'[<>]+', Name.Variable),
+ (r'[^.,+\-<>\[\]]+', Comment),
+ ],
+ 'root': [
+ (r'\[', Keyword, 'loop'),
+ (r'\]', Error),
+ include('common'),
+ ],
+ 'loop': [
+ (r'\[', Keyword, '#push'),
+ (r'\]', Keyword, '#pop'),
+ include('common'),
+ ]
+ }
+
+ def analyse_text(text):
+ """It's safe to assume that a program which mostly consists of + -
+ and < > is brainfuck."""
+ plus_minus_count = 0
+ greater_less_count = 0
+
+ range_to_check = max(256, len(text))
+
+ for c in text[:range_to_check]:
+ if c == '+' or c == '-':
+ plus_minus_count += 1
+ if c == '<' or c == '>':
+ greater_less_count += 1
+
+ if plus_minus_count > (0.25 * range_to_check):
+ return 1.0
+ if greater_less_count > (0.25 * range_to_check):
+ return 1.0
+
+ result = 0
+ if '[-]' in text:
+ result += 0.5
+
+ return result
+
+
+class BefungeLexer(RegexLexer):
+ """
+ Lexer for the esoteric Befunge language.
+
+ .. versionadded:: 0.7
+ """
+ name = 'Befunge'
+ url = 'http://en.wikipedia.org/wiki/Befunge'
+ aliases = ['befunge']
+ filenames = ['*.befunge']
+ mimetypes = ['application/x-befunge']
+
+ tokens = {
+ 'root': [
+ (r'[0-9a-f]', Number),
+ (r'[+*/%!`-]', Operator), # Traditional math
+ (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives
+ (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives
+ (r'[|_mw]', Keyword),
+ (r'[{}]', Name.Tag), # Befunge-98 stack ops
+ (r'".*?"', String.Double), # Strings don't appear to allow escapes
+ (r'\'.', String.Single), # Single character
+ (r'[#;]', Comment), # Trampoline... depends on direction hit
+ (r'[pg&~=@iotsy]', Keyword), # Misc
+ (r'[()A-Z]', Comment), # Fingerprints
+ (r'\s+', Whitespace), # Whitespace doesn't matter
+ ],
+ }
+
+
+class CAmkESLexer(RegexLexer):
+ """
+ Basic lexer for the input language for the CAmkES component platform.
+
+ .. versionadded:: 2.1
+ """
+ name = 'CAmkES'
+ url = 'https://sel4.systems/CAmkES/'
+ aliases = ['camkes', 'idl4']
+ filenames = ['*.camkes', '*.idl4']
+
+ tokens = {
+ 'root': [
+ # C pre-processor directive
+ (r'^(\s*)(#.*)(\n)', bygroups(Whitespace, Comment.Preproc,
+ Whitespace)),
+
+ # Whitespace, comments
+ (r'\s+', Whitespace),
+ (r'/\*(.|\n)*?\*/', Comment),
+ (r'//.*$', Comment),
+
+ (r'[\[(){},.;\]]', Punctuation),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+
+ (words(('assembly', 'attribute', 'component', 'composition',
+ 'configuration', 'connection', 'connector', 'consumes',
+ 'control', 'dataport', 'Dataport', 'Dataports', 'emits',
+ 'event', 'Event', 'Events', 'export', 'from', 'group',
+ 'hardware', 'has', 'interface', 'Interface', 'maybe',
+ 'procedure', 'Procedure', 'Procedures', 'provides',
+ 'template', 'thread', 'threads', 'to', 'uses', 'with'),
+ suffix=r'\b'), Keyword),
+
+ (words(('bool', 'boolean', 'Buf', 'char', 'character', 'double',
+ 'float', 'in', 'inout', 'int', 'int16_6', 'int32_t',
+ 'int64_t', 'int8_t', 'integer', 'mutex', 'out', 'real',
+ 'refin', 'semaphore', 'signed', 'string', 'struct',
+ 'uint16_t', 'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t',
+ 'unsigned', 'void'),
+ suffix=r'\b'), Keyword.Type),
+
+ # Recognised attributes
+ (r'[a-zA-Z_]\w*_(priority|domain|buffer)', Keyword.Reserved),
+ (words(('dma_pool', 'from_access', 'to_access'), suffix=r'\b'),
+ Keyword.Reserved),
+
+ # CAmkES-level include
+ (r'(import)(\s+)((?:<[^>]*>|"[^"]*");)',
+ bygroups(Comment.Preproc, Whitespace, Comment.Preproc)),
+
+ # C-level include
+ (r'(include)(\s+)((?:<[^>]*>|"[^"]*");)',
+ bygroups(Comment.Preproc, Whitespace, Comment.Preproc)),
+
+ # Literals
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'-?[\d]+', Number),
+ (r'-?[\d]+\.[\d]+', Number.Float),
+ (r'"[^"]*"', String),
+ (r'[Tt]rue|[Ff]alse', Name.Builtin),
+
+ # Identifiers
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ }
+
+
+class CapDLLexer(RegexLexer):
+ """
+ Basic lexer for CapDL.
+
+ The source of the primary tool that reads such specifications is available
+ at https://github.com/seL4/capdl/tree/master/capDL-tool. Note that this
+ lexer only supports a subset of the grammar. For example, identifiers can
+ shadow type names, but these instances are currently incorrectly
+ highlighted as types. Supporting this would need a stateful lexer that is
+ considered unnecessarily complex for now.
+
+ .. versionadded:: 2.2
+ """
+ name = 'CapDL'
+ url = 'https://ssrg.nicta.com.au/publications/nictaabstracts/Kuz_KLW_10.abstract.pml'
+ aliases = ['capdl']
+ filenames = ['*.cdl']
+
+ tokens = {
+ 'root': [
+ # C pre-processor directive
+ (r'^(\s*)(#.*)(\n)',
+ bygroups(Whitespace, Comment.Preproc, Whitespace)),
+
+ # Whitespace, comments
+ (r'\s+', Whitespace),
+ (r'/\*(.|\n)*?\*/', Comment),
+ (r'(//|--).*$', Comment),
+
+ (r'[<>\[(){},:;=\]]', Punctuation),
+ (r'\.\.', Punctuation),
+
+ (words(('arch', 'arm11', 'caps', 'child_of', 'ia32', 'irq', 'maps',
+ 'objects'), suffix=r'\b'), Keyword),
+
+ (words(('aep', 'asid_pool', 'cnode', 'ep', 'frame', 'io_device',
+ 'io_ports', 'io_pt', 'notification', 'pd', 'pt', 'tcb',
+ 'ut', 'vcpu'), suffix=r'\b'), Keyword.Type),
+
+ # Properties
+ (words(('asid', 'addr', 'badge', 'cached', 'dom', 'domainID', 'elf',
+ 'fault_ep', 'G', 'guard', 'guard_size', 'init', 'ip',
+ 'prio', 'sp', 'R', 'RG', 'RX', 'RW', 'RWG', 'RWX', 'W',
+ 'WG', 'WX', 'level', 'masked', 'master_reply', 'paddr',
+ 'ports', 'reply', 'uncached'), suffix=r'\b'),
+ Keyword.Reserved),
+
+ # Literals
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+(\.\d+)?(k|M)?', Number),
+ (words(('bits',), suffix=r'\b'), Number),
+ (words(('cspace', 'vspace', 'reply_slot', 'caller_slot',
+ 'ipc_buffer_slot'), suffix=r'\b'), Number),
+
+ # Identifiers
+ (r'[a-zA-Z_][-@\.\w]*', Name),
+ ],
+ }
+
+
+class RedcodeLexer(RegexLexer):
+ """
+ A simple Redcode lexer based on ICWS'94.
+ Contributed by Adam Blinkinsop <blinks@acm.org>.
+
+ .. versionadded:: 0.8
+ """
+ name = 'Redcode'
+ aliases = ['redcode']
+ filenames = ['*.cw']
+
+ opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD',
+ 'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL',
+ 'ORG', 'EQU', 'END')
+ modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I')
+
+ tokens = {
+ 'root': [
+ # Whitespace:
+ (r'\s+', Whitespace),
+ (r';.*$', Comment.Single),
+ # Lexemes:
+ # Identifiers
+ (r'\b(%s)\b' % '|'.join(opcodes), Name.Function),
+ (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator),
+ (r'[A-Za-z_]\w+', Name),
+ # Operators
+ (r'[-+*/%]', Operator),
+ (r'[#$@<>]', Operator), # mode
+ (r'[.,]', Punctuation), # mode
+ # Numbers
+ (r'[-+]?\d+', Number.Integer),
+ ],
+ }
+
+
+class AheuiLexer(RegexLexer):
+ """
+ Aheui is esoteric language based on Korean alphabets.
+ """
+
+ name = 'Aheui'
+ url = 'http://aheui.github.io/'
+ aliases = ['aheui']
+ filenames = ['*.aheui']
+
+ tokens = {
+ 'root': [
+ ('['
+ '나-낳냐-냫너-넣녀-녛노-놓뇨-눟뉴-닇'
+ '다-닿댜-댷더-덯뎌-뎧도-돟됴-둫듀-딓'
+ '따-땋땨-떃떠-떻뗘-뗳또-똫뚀-뚷뜌-띟'
+ '라-랗랴-럏러-렇려-렿로-롷료-뤃류-릫'
+ '마-맣먀-먛머-멓며-몋모-뫃묘-뭏뮤-믷'
+ '바-밯뱌-뱧버-벟벼-볗보-봏뵤-붛뷰-빃'
+ '빠-빻뺘-뺳뻐-뻫뼈-뼣뽀-뽛뾰-뿧쀼-삏'
+ '사-샇샤-샿서-섷셔-셯소-솧쇼-숳슈-싛'
+ '싸-쌓쌰-썋써-쎃쎠-쎻쏘-쏳쑈-쑿쓔-씧'
+ '자-잫쟈-쟣저-젛져-졓조-좋죠-줗쥬-즿'
+ '차-챃챠-챻처-첳쳐-쳫초-촣쵸-춯츄-칗'
+ '카-캏캬-컇커-컿켜-켷코-콯쿄-쿻큐-킣'
+ '타-탛탸-턓터-텋텨-톃토-톻툐-퉇튜-틯'
+ '파-팧퍄-퍟퍼-펗펴-폏포-퐇표-풓퓨-픻'
+ '하-핳햐-햫허-헣혀-혛호-홓효-훟휴-힇'
+ ']', Operator),
+ ('.', Comment),
+ ],
+ }
diff --git a/pygments/lexers/ezhil.py b/pygments/lexers/ezhil.py
new file mode 100644
index 0000000..caa52d5
--- /dev/null
+++ b/pygments/lexers/ezhil.py
@@ -0,0 +1,77 @@
+"""
+ pygments.lexers.ezhil
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Pygments lexers for Ezhil language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Keyword, Comment, Name, String, Number, \
+ Punctuation, Operator, Whitespace
+
+__all__ = ['EzhilLexer']
+
+
+class EzhilLexer(RegexLexer):
+ """
+ Lexer for Ezhil, a Tamil script-based programming language.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Ezhil'
+ url = 'http://ezhillang.org'
+ aliases = ['ezhil']
+ filenames = ['*.n']
+ mimetypes = ['text/x-ezhil']
+ # Refer to tamil.utf8.tamil_letters from open-tamil for a stricter version of this.
+ # This much simpler version is close enough, and includes combining marks.
+ _TALETTERS = '[a-zA-Z_]|[\u0b80-\u0bff]'
+ tokens = {
+ 'root': [
+ include('keywords'),
+ (r'#.*$', Comment.Single),
+ (r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator),
+ ('இல்', Operator.Word),
+ (words(('assert', 'max', 'min',
+ 'நீளம்', 'சரம்_இடமாற்று', 'சரம்_கண்டுபிடி',
+ 'பட்டியல்', 'பின்இணை', 'வரிசைப்படுத்து',
+ 'எடு', 'தலைகீழ்', 'நீட்டிக்க', 'நுழைக்க', 'வை',
+ 'கோப்பை_திற', 'கோப்பை_எழுது', 'கோப்பை_மூடு',
+ 'pi', 'sin', 'cos', 'tan', 'sqrt', 'hypot', 'pow',
+ 'exp', 'log', 'log10', 'exit',
+ ), suffix=r'\b'), Name.Builtin),
+ (r'(True|False)\b', Keyword.Constant),
+ (r'[^\S\n]+', Whitespace),
+ include('identifier'),
+ include('literal'),
+ (r'[(){}\[\]:;.]', Punctuation),
+ ],
+ 'keywords': [
+ ('பதிப்பி|தேர்ந்தெடு|தேர்வு|ஏதேனில்|ஆனால்|இல்லைஆனால்|இல்லை|ஆக|ஒவ்வொன்றாக|இல்|வரை|செய்|முடியேனில்|பின்கொடு|முடி|நிரல்பாகம்|தொடர்|நிறுத்து|நிரல்பாகம்', Keyword),
+ ],
+ 'identifier': [
+ ('(?:'+_TALETTERS+')(?:[0-9]|'+_TALETTERS+')*', Name),
+ ],
+ 'literal': [
+ (r'".*?"', String),
+ (r'\d+((\.\d*)?[eE][+-]?\d+|\.\d*)', Number.Float),
+ (r'\d+', Number.Integer),
+ ]
+ }
+
+ def analyse_text(text):
+ """This language uses Tamil-script. We'll assume that if there's a
+ decent amount of Tamil-characters, it's this language. This assumption
+ is obviously horribly off if someone uses string literals in tamil
+ in another language."""
+ if len(re.findall(r'[\u0b80-\u0bff]', text)) > 10:
+ return 0.25
+
+ def __init__(self, **options):
+ super().__init__(**options)
+ self.encoding = options.get('encoding', 'utf-8')
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
new file mode 100644
index 0000000..db10652
--- /dev/null
+++ b/pygments/lexers/factor.py
@@ -0,0 +1,364 @@
+"""
+ pygments.lexers.factor
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Factor language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, default, words
+from pygments.token import Text, Comment, Keyword, Name, String, Number, \
+ Whitespace, Punctuation
+
+__all__ = ['FactorLexer']
+
+
+class FactorLexer(RegexLexer):
+ """
+ Lexer for the Factor language.
+
+ .. versionadded:: 1.4
+ """
+ name = 'Factor'
+ url = 'http://factorcode.org'
+ aliases = ['factor']
+ filenames = ['*.factor']
+ mimetypes = ['text/x-factor']
+
+ builtin_kernel = words((
+ '-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
+ '2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
+ '3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
+ '?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
+ 'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
+ 'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
+ 'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
+ 'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
+ 'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
+ 'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
+ 'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
+ 'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
+ 'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
+ 'wrapper', 'wrapper?', 'xor'), suffix=r'(\s+)')
+
+ builtin_assocs = words((
+ '2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
+ 'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
+ 'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
+ 'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
+ 'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
+ 'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
+ 'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
+ 'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
+ 'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
+ 'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
+ 'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
+ 'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'(\s+)')
+
+ builtin_combinators = words((
+ '2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
+ '4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
+ 'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
+ 'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
+ 'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
+ 'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'(\s+)')
+
+ builtin_math = words((
+ '-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
+ '>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
+ '(each-integer)', '(find-integer)', '*', '+', '?1+',
+ 'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
+ 'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
+ 'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
+ 'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
+ 'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
+ 'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
+ 'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
+ 'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
+ 'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
+ 'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
+ 'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
+ 'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
+ 'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
+ 'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
+ 'zero?'), suffix=r'(\s+)')
+
+ builtin_sequences = words((
+ '1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
+ '2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
+ '3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
+ '?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
+ 'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
+ 'assert-sequence', 'assert-sequence=', 'assert-sequence?',
+ 'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
+ 'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
+ 'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
+ 'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
+ 'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
+ 'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
+ 'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
+ 'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
+ 'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
+ 'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
+ 'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
+ 'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
+ 'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
+ 'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
+ 'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
+ 'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
+ 'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
+ 'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
+ 'non-negative-integer-expected', 'non-negative-integer-expected?',
+ 'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
+ 'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
+ 'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
+ 'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
+ 'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
+ 'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
+ 'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
+ 'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
+ 'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
+ 'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
+ 'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
+ 'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
+ 'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
+ 'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
+ 'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
+ 'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
+ 'when-empty'), suffix=r'(\s+)')
+
+ builtin_namespaces = words((
+ '+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
+ 'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
+ 'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
+ 'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
+ suffix=r'(\s+)')
+
+ builtin_arrays = words((
+ '1array', '2array', '3array', '4array', '<array>', '>array', 'array',
+ 'array?', 'pair', 'pair?', 'resize-array'), suffix=r'(\s+)')
+
+ builtin_io = words((
+ '(each-stream-block-slice)', '(each-stream-block)',
+ '(stream-contents-by-block)', '(stream-contents-by-element)',
+ '(stream-contents-by-length-or-block)',
+ '(stream-contents-by-length)', '+byte+', '+character+',
+ 'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
+ 'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
+ 'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
+ 'error-stream', 'flush', 'input-stream', 'input-stream?',
+ 'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
+ 'output-stream', 'output-stream?', 'print', 'read', 'read-into',
+ 'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
+ 'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
+ 'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
+ 'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
+ 'stream-copy*', 'stream-element-type', 'stream-flush',
+ 'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
+ 'stream-read', 'stream-read-into', 'stream-read-partial',
+ 'stream-read-partial-into', 'stream-read-partial-unsafe',
+ 'stream-read-unsafe', 'stream-read-until', 'stream-read1',
+ 'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
+ 'stream-write', 'stream-write1', 'tell-input', 'tell-output',
+ 'with-error-stream', 'with-error-stream*', 'with-error>output',
+ 'with-input-output+error-streams',
+ 'with-input-output+error-streams*', 'with-input-stream',
+ 'with-input-stream*', 'with-output-stream', 'with-output-stream*',
+ 'with-output>error', 'with-output+error-stream',
+ 'with-output+error-stream*', 'with-streams', 'with-streams*',
+ 'write', 'write1'), suffix=r'(\s+)')
+
+ builtin_strings = words((
+ '1string', '<string>', '>string', 'resize-string', 'string',
+ 'string?'), suffix=r'(\s+)')
+
+ builtin_vectors = words((
+ '1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
+ suffix=r'(\s+)')
+
+ builtin_continuations = words((
+ '<condition>', '<continuation>', '<restart>', 'attempt-all',
+ 'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
+ 'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
+ 'condition?', 'continuation', 'continuation?', 'continue',
+ 'continue-restart', 'continue-with', 'current-continuation',
+ 'error', 'error-continuation', 'error-in-thread', 'error-thread',
+ 'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
+ 'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
+ 'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
+ 'throw-restarts', 'with-datastack', 'with-return'), suffix=r'(\s+)')
+
+ tokens = {
+ 'root': [
+ # factor allows a file to start with a shebang
+ (r'#!.*$', Comment.Preproc),
+ default('base'),
+ ],
+ 'base': [
+ (r'\s+', Whitespace),
+
+ # defining words
+ (r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace,
+ Name.Function)),
+ (r'(C:)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace,
+ Name.Class)),
+ (r'(GENERIC:)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace,
+ Name.Function)),
+ (r'(\()(\s)', bygroups(Name.Function, Whitespace), 'stackeffect'),
+ (r'(;)(\s)', bygroups(Keyword, Whitespace)),
+
+ # imports and namespaces
+ (r'(USING:)(\s+)',
+ bygroups(Keyword.Namespace, Whitespace), 'vocabs'),
+ (r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+ (r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
+ Whitespace, Name.Namespace)),
+ (r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
+ Whitespace), 'words'),
+ (r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+)(=>)(\s+)(\S+)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
+ Name.Namespace, Whitespace, Punctuation, Whitespace,
+ Name.Function)),
+ (r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
+ Name.Function)),
+ (r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Function)),
+
+ # tuples and classes
+ (r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Punctuation,
+ Whitespace, Name.Class), 'slots'),
+ (r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Class), 'slots'),
+ (r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'(PREDICATE:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace,
+ Punctuation, Whitespace, Name.Class)),
+ (r'(C:)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace, Name.Class)),
+ (r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Name.Class)),
+ (r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Function)),
+ (r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
+ (r'SINGLETONS:', Keyword, 'classes'),
+
+ # other syntax
+ (r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'(SYMBOLS:)(\s+)', bygroups(Keyword, Whitespace), 'words'),
+ (r'(SYNTAX:)(\s+)', bygroups(Keyword, Whitespace)),
+ (r'(ALIEN:)(\s+)', bygroups(Keyword, Whitespace)),
+ (r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
+ (r'(FUNCTION:)(\s+)'
+ r'(\S+)(\s+)(\S+)(\s+)'
+ r'(\()(\s+)([^)]+)(\))(\s)',
+ bygroups(Keyword.Namespace, Whitespace,
+ Text, Whitespace, Name.Function, Whitespace,
+ Punctuation, Whitespace, Text, Punctuation, Whitespace)),
+ (r'(FUNCTION-ALIAS:)(\s+)'
+ r'(\S+)(\s+)(\S+)(\s+)'
+ r'(\S+)(\s+)'
+ r'(\()(\s+)([^)]+)(\))(\s)',
+ bygroups(Keyword.Namespace, Whitespace,
+ Text, Whitespace, Name.Function, Whitespace,
+ Name.Function, Whitespace,
+ Punctuation, Whitespace, Text, Punctuation, Whitespace)),
+
+ # vocab.private
+ (r'(<PRIVATE|PRIVATE>)(\s)', bygroups(Keyword.Namespace, Whitespace)),
+
+ # strings
+ (r'"""\s(?:.|\n)*?\s"""', String),
+ (r'"(?:\\\\|\\"|[^"])*"', String),
+ (r'(\S+")(\s+)((?:\\\\|\\"|[^"])*")',
+ bygroups(String, Whitespace, String)),
+ (r'(CHAR:)(\s+)(\\[\\abfnrstv]|[^\\]\S*)(\s)',
+ bygroups(String.Char, Whitespace, String.Char, Whitespace)),
+
+ # comments
+ (r'!\s+.*$', Comment),
+ (r'#!\s+.*$', Comment),
+ (r'/\*\s+(?:.|\n)*?\s\*/', Comment),
+
+ # boolean constants
+ (r'[tf]\b', Name.Constant),
+
+ # symbols and literals
+ (r'[\\$]\s+\S+', Name.Constant),
+ (r'M\\\s+\S+\s+\S+', Name.Constant),
+
+ # numbers
+ (r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
+ (r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
+ (r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
+ (r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
+ (r'0b[01]+\s', Number.Bin),
+ (r'0o[0-7]+\s', Number.Oct),
+ (r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
+ (r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
+
+ # keywords
+ (r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
+ Keyword),
+
+ # builtins
+ (builtin_kernel, bygroups(Name.Builtin, Whitespace)),
+ (builtin_assocs, bygroups(Name.Builtin, Whitespace)),
+ (builtin_combinators, bygroups(Name.Builtin, Whitespace)),
+ (builtin_math, bygroups(Name.Builtin, Whitespace)),
+ (builtin_sequences, bygroups(Name.Builtin, Whitespace)),
+ (builtin_namespaces, bygroups(Name.Builtin, Whitespace)),
+ (builtin_arrays, bygroups(Name.Builtin, Whitespace)),
+ (builtin_io, bygroups(Name.Builtin, Whitespace)),
+ (builtin_strings, bygroups(Name.Builtin, Whitespace)),
+ (builtin_vectors, bygroups(Name.Builtin, Whitespace)),
+ (builtin_continuations, bygroups(Name.Builtin, Whitespace)),
+
+ # everything else is text
+ (r'\S+', Text),
+ ],
+ 'stackeffect': [
+ (r'\s+', Whitespace),
+ (r'(\()(\s+)', bygroups(Name.Function, Whitespace), 'stackeffect'),
+ (r'(\))(\s+)', bygroups(Name.Function, Whitespace), '#pop'),
+ (r'(--)(\s+)', bygroups(Name.Function, Whitespace)),
+ (r'\S+', Name.Variable),
+ ],
+ 'slots': [
+ (r'\s+', Whitespace),
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
+ (r'(\{)(\s+)(\S+)(\s+)([^}]+)(\s+)(\})(\s+)',
+ bygroups(Text, Whitespace, Name.Variable, Whitespace,
+ Text, Whitespace, Text, Whitespace)),
+ (r'\S+', Name.Variable),
+ ],
+ 'vocabs': [
+ (r'\s+', Whitespace),
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
+ (r'\S+', Name.Namespace),
+ ],
+ 'classes': [
+ (r'\s+', Whitespace),
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
+ (r'\S+', Name.Class),
+ ],
+ 'words': [
+ (r'\s+', Whitespace),
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
+ (r'\S+', Name.Function),
+ ],
+ }
diff --git a/pygments/lexers/fantom.py b/pygments/lexers/fantom.py
new file mode 100644
index 0000000..1825b92
--- /dev/null
+++ b/pygments/lexers/fantom.py
@@ -0,0 +1,251 @@
+"""
+ pygments.lexers.fantom
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Fantom language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from string import Template
+
+from pygments.lexer import RegexLexer, include, bygroups, using, \
+ this, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal, Whitespace
+
+__all__ = ['FantomLexer']
+
+
+class FantomLexer(RegexLexer):
+ """
+ For Fantom source code.
+
+ .. versionadded:: 1.5
+ """
+ name = 'Fantom'
+ aliases = ['fan']
+ filenames = ['*.fan']
+ mimetypes = ['application/x-fantom']
+
+ # often used regexes
+ def s(str):
+ return Template(str).substitute(
+ dict(
+ pod=r'[\"\w\.]+',
+ eos=r'\n|;',
+ id=r'[a-zA-Z_]\w*',
+ # all chars which can be part of type definition. Starts with
+ # either letter, or [ (maps), or | (funcs)
+ type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?',
+ )
+ )
+
+ tokens = {
+ 'comments': [
+ (r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline
+ (r'//.*?$', Comment.Single), # Single line
+ # TODO: highlight references in fandocs
+ (r'\*\*.*?$', Comment.Special), # Fandoc
+ (r'#.*$', Comment.Single) # Shell-style
+ ],
+ 'literals': [
+ (r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration
+ (r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot
+ (r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal
+ (r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex
+ (r'\b-?[\d_]+', Number.Integer), # Int
+ (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char
+ (r'"', Punctuation, 'insideStr'), # Opening quote
+ (r'`', Punctuation, 'insideUri'), # Opening accent
+ (r'\b(true|false|null)\b', Keyword.Constant), # Bool & null
+ (r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL
+ bygroups(Name.Namespace, Punctuation, Name.Class,
+ Punctuation, String, Punctuation)),
+ (r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal
+ bygroups(Name.Namespace, Punctuation, Name.Class,
+ Punctuation, Name.Function)),
+ (r'\[,\]', Literal), # Empty list
+ (s(r'($type)(\[,\])'), # Typed empty list
+ bygroups(using(this, state='inType'), Literal)),
+ (r'\[:\]', Literal), # Empty Map
+ (s(r'($type)(\[:\])'),
+ bygroups(using(this, state='inType'), Literal)),
+ ],
+ 'insideStr': [
+ (r'\\\\', String.Escape), # Escaped backslash
+ (r'\\"', String.Escape), # Escaped "
+ (r'\\`', String.Escape), # Escaped `
+ (r'\$\w+', String.Interpol), # Subst var
+ (r'\$\{.*?\}', String.Interpol), # Subst expr
+ (r'"', Punctuation, '#pop'), # Closing quot
+ (r'.', String) # String content
+ ],
+ 'insideUri': [ # TODO: remove copy/paste str/uri
+ (r'\\\\', String.Escape), # Escaped backslash
+ (r'\\"', String.Escape), # Escaped "
+ (r'\\`', String.Escape), # Escaped `
+ (r'\$\w+', String.Interpol), # Subst var
+ (r'\$\{.*?\}', String.Interpol), # Subst expr
+ (r'`', Punctuation, '#pop'), # Closing tick
+ (r'.', String.Backtick) # URI content
+ ],
+ 'protectionKeywords': [
+ (r'\b(public|protected|private|internal)\b', Keyword),
+ ],
+ 'typeKeywords': [
+ (r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
+ ],
+ 'methodKeywords': [
+ (r'\b(abstract|native|once|override|static|virtual|final)\b',
+ Keyword),
+ ],
+ 'fieldKeywords': [
+ (r'\b(abstract|const|final|native|override|static|virtual|'
+ r'readonly)\b', Keyword)
+ ],
+ 'otherKeywords': [
+ (words((
+ 'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while',
+ 'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue',
+ 'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (r'\b(it|this|super)\b', Name.Builtin.Pseudo),
+ ],
+ 'operators': [
+ (r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
+ ],
+ 'inType': [
+ (r'[\[\]|\->:?]', Punctuation),
+ (s(r'$id'), Name.Class),
+ default('#pop'),
+
+ ],
+ 'root': [
+ include('comments'),
+ include('protectionKeywords'),
+ include('typeKeywords'),
+ include('methodKeywords'),
+ include('fieldKeywords'),
+ include('literals'),
+ include('otherKeywords'),
+ include('operators'),
+ (r'using\b', Keyword.Namespace, 'using'), # Using stmt
+ (r'@\w+', Name.Decorator, 'facet'), # Symbol
+ (r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Class),
+ 'inheritance'), # Inheritance list
+
+ # Type var := val
+ (s(r'($type)([ \t]+)($id)(\s*)(:=)'),
+ bygroups(using(this, state='inType'), Whitespace,
+ Name.Variable, Whitespace, Operator)),
+
+ # var := val
+ (s(r'($id)(\s*)(:=)'),
+ bygroups(Name.Variable, Whitespace, Operator)),
+
+ # .someId( or ->someId( ###
+ (s(r'(\.|(?:\->))($id)(\s*)(\()'),
+ bygroups(Operator, Name.Function, Whitespace, Punctuation),
+ 'insideParen'),
+
+ # .someId or ->someId
+ (s(r'(\.|(?:\->))($id)'),
+ bygroups(Operator, Name.Function)),
+
+ # new makeXXX (
+ (r'(new)(\s+)(make\w*)(\s*)(\()',
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace, Punctuation),
+ 'insideMethodDeclArgs'),
+
+ # Type name (
+ (s(r'($type)([ \t]+)' # Return type and whitespace
+ r'($id)(\s*)(\()'), # method name + open brace
+ bygroups(using(this, state='inType'), Whitespace,
+ Name.Function, Whitespace, Punctuation),
+ 'insideMethodDeclArgs'),
+
+ # ArgType argName,
+ (s(r'($type)(\s+)($id)(\s*)(,)'),
+ bygroups(using(this, state='inType'), Whitespace, Name.Variable,
+ Whitespace, Punctuation)),
+
+ # ArgType argName)
+ # Covered in 'insideParen' state
+
+ # ArgType argName -> ArgType|
+ (s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
+ bygroups(using(this, state='inType'), Whitespace, Name.Variable,
+ Whitespace, Punctuation, Whitespace, using(this, state='inType'),
+ Punctuation)),
+
+ # ArgType argName|
+ (s(r'($type)(\s+)($id)(\s*)(\|)'),
+ bygroups(using(this, state='inType'), Whitespace, Name.Variable,
+ Whitespace, Punctuation)),
+
+ # Type var
+ (s(r'($type)([ \t]+)($id)'),
+ bygroups(using(this, state='inType'), Whitespace,
+ Name.Variable)),
+
+ (r'\(', Punctuation, 'insideParen'),
+ (r'\{', Punctuation, 'insideBrace'),
+ (r'\s+', Whitespace),
+ (r'.', Text)
+ ],
+ 'insideParen': [
+ (r'\)', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'insideMethodDeclArgs': [
+ (r'\)', Punctuation, '#pop'),
+ (s(r'($type)(\s+)($id)(\s*)(\))'),
+ bygroups(using(this, state='inType'), Whitespace, Name.Variable,
+ Whitespace, Punctuation), '#pop'),
+ include('root'),
+ ],
+ 'insideBrace': [
+ (r'\}', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'inheritance': [
+ (r'\s+', Whitespace), # Whitespace
+ (r':|,', Punctuation),
+ (r'(?:(\w+)(::))?(\w+)',
+ bygroups(Name.Namespace, Punctuation, Name.Class)),
+ (r'\{', Punctuation, '#pop')
+ ],
+ 'using': [
+ (r'[ \t]+', Whitespace), # consume whitespaces
+ (r'(\[)(\w+)(\])',
+ bygroups(Punctuation, Comment.Special, Punctuation)), # ffi
+ (r'(\")?([\w.]+)(\")?',
+ bygroups(Punctuation, Name.Namespace, Punctuation)), # podname
+ (r'::', Punctuation, 'usingClass'),
+ default('#pop')
+ ],
+ 'usingClass': [
+ (r'[ \t]+', Whitespace), # consume whitespaces
+ (r'(as)(\s+)(\w+)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class), '#pop:2'),
+ (r'[\w$]+', Name.Class),
+ default('#pop:2') # jump out to root state
+ ],
+ 'facet': [
+ (r'\s+', Whitespace),
+ (r'\{', Punctuation, 'facetFields'),
+ default('#pop')
+ ],
+ 'facetFields': [
+ include('comments'),
+ include('literals'),
+ include('operators'),
+ (r'\s+', Whitespace),
+ (r'(\s*)(\w+)(\s*)(=)', bygroups(Whitespace, Name, Whitespace, Operator)),
+ (r'\}', Punctuation, '#pop'),
+ (r'\s+', Whitespace),
+ (r'.', Text)
+ ],
+ }
diff --git a/pygments/lexers/felix.py b/pygments/lexers/felix.py
new file mode 100644
index 0000000..75df838
--- /dev/null
+++ b/pygments/lexers/felix.py
@@ -0,0 +1,276 @@
+"""
+ pygments.lexers.felix
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Felix language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, default, words, \
+ combined
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['FelixLexer']
+
+
+class FelixLexer(RegexLexer):
+ """
+ For Felix source code.
+
+ .. versionadded:: 1.2
+ """
+
+ name = 'Felix'
+ url = 'http://www.felix-lang.org'
+ aliases = ['felix', 'flx']
+ filenames = ['*.flx', '*.flxh']
+ mimetypes = ['text/x-felix']
+
+ preproc = (
+ 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
+ )
+
+ keywords = (
+ '_', '_deref', 'all', 'as',
+ 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
+ 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
+ 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
+ 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
+ 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
+ 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
+ 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
+ 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
+ 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
+ 'when', 'whilst', 'with', 'yield',
+ )
+
+ keyword_directives = (
+ '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
+ 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
+ 'package', 'private', 'pod', 'property', 'public', 'publish',
+ 'requires', 'todo', 'virtual', 'use',
+ )
+
+ keyword_declarations = (
+ 'def', 'let', 'ref', 'val', 'var',
+ )
+
+ keyword_types = (
+ 'unit', 'void', 'any', 'bool',
+ 'byte', 'offset',
+ 'address', 'caddress', 'cvaddress', 'vaddress',
+ 'tiny', 'short', 'int', 'long', 'vlong',
+ 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
+ 'int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float', 'double', 'ldouble',
+ 'complex', 'dcomplex', 'lcomplex',
+ 'imaginary', 'dimaginary', 'limaginary',
+ 'char', 'wchar', 'uchar',
+ 'charp', 'charcp', 'ucharp', 'ucharcp',
+ 'string', 'wstring', 'ustring',
+ 'cont',
+ 'array', 'varray', 'list',
+ 'lvalue', 'opt', 'slice',
+ )
+
+ keyword_constants = (
+ 'false', 'true',
+ )
+
+ operator_words = (
+ 'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
+ )
+
+ name_builtins = (
+ '_svc', 'while',
+ )
+
+ name_pseudo = (
+ 'root', 'self', 'this',
+ )
+
+ decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+
+ # Keywords
+ (words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
+ 'union'), suffix=r'\b'),
+ Keyword, 'funcname'),
+ (words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
+ Keyword, 'classname'),
+ (r'(instance|module|typeclass)\b', Keyword, 'modulename'),
+
+ (words(keywords, suffix=r'\b'), Keyword),
+ (words(keyword_directives, suffix=r'\b'), Name.Decorator),
+ (words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
+ (words(keyword_types, suffix=r'\b'), Keyword.Type),
+ (words(keyword_constants, suffix=r'\b'), Keyword.Constant),
+
+ # Operators
+ include('operators'),
+
+ # Float Literal
+ # -- Hex Float
+ (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
+ r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
+ # -- DecimalFloat
+ (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+ r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
+ (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
+ Number.Float),
+
+ # IntegerLiteral
+ # -- Binary
+ (r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
+ # -- Octal
+ (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
+ # -- Hexadecimal
+ (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
+ # -- Decimal
+ (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
+
+ # Strings
+ ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
+ ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
+ ('([rR][cC]?|[cC][rR])"', String, 'dqs'),
+ ("([rR][cC]?|[cC][rR])'", String, 'sqs'),
+ ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
+ ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
+ ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
+ ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
+
+ # Punctuation
+ (r'[\[\]{}:(),;?]', Punctuation),
+
+ # Labels
+ (r'[a-zA-Z_]\w*:>', Name.Label),
+
+ # Identifiers
+ (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
+ (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+
+ include('comment'),
+
+ # Preprocessor
+ (r'(#)(\s*)(if)(\s+)(0)',
+ bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
+ Whitespace, Comment.Preproc), 'if0'),
+ (r'#', Comment.Preproc, 'macro'),
+ ],
+ 'operators': [
+ (r'(%s)\b' % '|'.join(operator_words), Operator.Word),
+ (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
+ ],
+ 'comment': [
+ (r'//(.*?)$', Comment.Single),
+ (r'/[*]', Comment.Multiline, 'comment2'),
+ ],
+ 'comment2': [
+ (r'[^/*]', Comment.Multiline),
+ (r'/[*]', Comment.Multiline, '#push'),
+ (r'[*]/', Comment.Multiline, '#pop'),
+ (r'[/*]', Comment.Multiline),
+ ],
+ 'if0': [
+ (r'^(\s*)(#if.*?(?<!\\))(\n)',
+ bygroups(Whitespace, Comment, Whitespace), '#push'),
+ (r'^(\s*)(#endif.*?(?<!\\))(\n)',
+ bygroups(Whitespace, Comment, Whitespace), '#pop'),
+ (r'(.*?)(\n)', bygroups(Comment, Whitespace)),
+ ],
+ 'macro': [
+ include('comment'),
+ (r'(import|include)(\s+)(<[^>]*?>)',
+ bygroups(Comment.Preproc, Whitespace, String), '#pop'),
+ (r'(import|include)(\s+)("[^"]*?")',
+ bygroups(Comment.Preproc, Whitespace, String), '#pop'),
+ (r"(import|include)(\s+)('[^']*?')",
+ bygroups(Comment.Preproc, Whitespace, String), '#pop'),
+ (r'[^/\n]+', Comment.Preproc),
+ # (r'/[*](.|\n)*?[*]/', Comment),
+ # (r'//.*?\n', Comment, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'funcname': [
+ include('whitespace'),
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
+ # anonymous functions
+ (r'(?=\()', Text, '#pop'),
+ ],
+ 'classname': [
+ include('whitespace'),
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+ # anonymous classes
+ (r'(?=\{)', Text, '#pop'),
+ ],
+ 'modulename': [
+ include('whitespace'),
+ (r'\[', Punctuation, ('modulename2', 'tvarlist')),
+ default('modulename2'),
+ ],
+ 'modulename2': [
+ include('whitespace'),
+ (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
+ ],
+ 'tvarlist': [
+ include('whitespace'),
+ include('operators'),
+ (r'\[', Punctuation, '#push'),
+ (r'\]', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'(with|where)\b', Keyword),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings': [
+ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ (r'[^\\\'"%\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'%', String)
+ # newlines are an error (use "nl" state)
+ ],
+ 'nl': [
+ (r'\n', String)
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ # included here again for raw strings
+ (r'\\\\|\\"|\\\n', String.Escape),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ # included here again for raw strings
+ (r"\\\\|\\'|\\\n", String.Escape),
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ }
diff --git a/pygments/lexers/fift.py b/pygments/lexers/fift.py
new file mode 100644
index 0000000..2cd8c38
--- /dev/null
+++ b/pygments/lexers/fift.py
@@ -0,0 +1,67 @@
+"""
+ pygments.lexers.fift
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for fift.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include
+from pygments.token import Literal, Comment, Name, String, Number, Whitespace
+
+__all__ = ['FiftLexer']
+
+
+class FiftLexer(RegexLexer):
+ """
+ For Fift source code.
+ """
+
+ name = 'Fift'
+ aliases = ['fift', 'fif']
+ filenames = ['*.fif']
+ url = 'https://ton-blockchain.github.io/docs/fiftbase.pdf'
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+
+ include('comments'),
+
+ (r'[\.+]?\"', String, 'string'),
+
+ # numbers
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'0b[01]+', Number.Bin),
+ (r'-?[0-9]+("/"-?[0-9]+)?', Number.Decimal),
+
+ # slices
+ (r'b\{[01]+\}', Literal),
+ (r'x\{[0-9a-fA-F_]+\}', Literal),
+
+ # byte literal
+ (r'B\{[0-9a-fA-F_]+\}', Literal),
+
+ # treat anything as word
+ (r'\S+', Name)
+ ],
+
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'\"', String, '#pop'),
+ (r'[^\"\r\n\\]+', String)
+ ],
+
+ 'comments': [
+ (r'//.*', Comment.Singleline),
+ (r'/\*', Comment.Multiline, 'comment'),
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ }
diff --git a/pygments/lexers/floscript.py b/pygments/lexers/floscript.py
new file mode 100644
index 0000000..61f6879
--- /dev/null
+++ b/pygments/lexers/floscript.py
@@ -0,0 +1,82 @@
+"""
+ pygments.lexers.floscript
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for FloScript
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['FloScriptLexer']
+
+
+class FloScriptLexer(RegexLexer):
+ """
+ For FloScript configuration language source code.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'FloScript'
+ url = 'https://github.com/ioflo/ioflo'
+ aliases = ['floscript', 'flo']
+ filenames = ['*.flo']
+
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r'%', ttype),
+ # newlines are an error (use "nl" state)
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+
+ (r'[]{}:(),;[]', Punctuation),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)),
+ (r'\\', Text),
+ (r'(to|by|with|from|per|for|cum|qua|via|as|at|in|of|on|re|is|if|be|into|'
+ r'and|not)\b', Operator.Word),
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
+ (r'(load|init|server|logger|log|loggee|first|over|under|next|done|timeout|'
+ r'repeat|native|benter|enter|recur|exit|precur|renter|rexit|print|put|inc|'
+ r'copy|set|aux|rear|raze|go|let|do|bid|ready|start|stop|run|abort|use|flo|'
+ r'give|take)\b', Name.Builtin),
+ (r'(frame|framer|house)\b', Keyword),
+ ('"', String, 'string'),
+
+ include('name'),
+ include('numbers'),
+ (r'#.+$', Comment.Single),
+ ],
+ 'string': [
+ ('[^"]+', String),
+ ('"', String, '#pop'),
+ ],
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[bB][01]+', Number.Bin),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+j?', Number.Integer)
+ ],
+
+ 'name': [
+ (r'@[\w.]+', Name.Decorator),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ }
diff --git a/pygments/lexers/forth.py b/pygments/lexers/forth.py
new file mode 100644
index 0000000..2207908
--- /dev/null
+++ b/pygments/lexers/forth.py
@@ -0,0 +1,179 @@
+"""
+ pygments.lexers.forth
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Forth language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Text, Comment, Keyword, Name, String, Number, \
+ Whitespace
+
+
+__all__ = ['ForthLexer']
+
+
+class ForthLexer(RegexLexer):
+ """
+ Lexer for Forth files.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Forth'
+ url = 'https://www.forth.com/forth/'
+ aliases = ['forth']
+ filenames = ['*.frt', '*.fs']
+ mimetypes = ['application/x-forth']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ # All comment types
+ (r'\\.*?$', Comment.Single),
+ (r'\([\s].*?\)', Comment.Single),
+ # defining words. The next word is a new command name
+ (r'(:|variable|constant|value|buffer:)(\s+)',
+ bygroups(Keyword.Namespace, Whitespace), 'worddef'),
+ # strings are rather simple
+ (r'([.sc]")(\s+?)', bygroups(String, Whitespace), 'stringdef'),
+ # keywords from the various wordsets
+ # *** Wordset BLOCK
+ (r'(blk|block|buffer|evaluate|flush|load|save-buffers|update|'
+ # *** Wordset BLOCK-EXT
+ r'empty-buffers|list|refill|scr|thru|'
+ # *** Wordset CORE
+ r'\#s|\*\/mod|\+loop|\/mod|0<|0=|1\+|1-|2!|'
+ r'2\*|2\/|2@|2drop|2dup|2over|2swap|>body|'
+ r'>in|>number|>r|\?dup|abort|abort\"|abs|'
+ r'accept|align|aligned|allot|and|base|begin|'
+ r'bl|c!|c,|c@|cell\+|cells|char|char\+|'
+ r'chars|constant|count|cr|create|decimal|'
+ r'depth|do|does>|drop|dup|else|emit|environment\?|'
+ r'evaluate|execute|exit|fill|find|fm\/mod|'
+ r'here|hold|i|if|immediate|invert|j|key|'
+ r'leave|literal|loop|lshift|m\*|max|min|'
+ r'mod|move|negate|or|over|postpone|quit|'
+ r'r>|r@|recurse|repeat|rot|rshift|s\"|s>d|'
+ r'sign|sm\/rem|source|space|spaces|state|swap|'
+ r'then|type|u\.|u\<|um\*|um\/mod|unloop|until|'
+ r'variable|while|word|xor|\[char\]|\[\'\]|'
+ r'@|!|\#|<\#|\#>|:|;|\+|-|\*|\/|,|<|>|\|1\+|1-|\.|'
+ # *** Wordset CORE-EXT
+ r'\.r|0<>|'
+ r'0>|2>r|2r>|2r@|:noname|\?do|again|c\"|'
+ r'case|compile,|endcase|endof|erase|false|'
+ r'hex|marker|nip|of|pad|parse|pick|refill|'
+ r'restore-input|roll|save-input|source-id|to|'
+ r'true|tuck|u\.r|u>|unused|value|within|'
+ r'\[compile\]|'
+ # *** Wordset CORE-EXT-obsolescent
+ r'\#tib|convert|expect|query|span|'
+ r'tib|'
+ # *** Wordset DOUBLE
+ r'2constant|2literal|2variable|d\+|d-|'
+ r'd\.|d\.r|d0<|d0=|d2\*|d2\/|d<|d=|d>s|'
+ r'dabs|dmax|dmin|dnegate|m\*\/|m\+|'
+ # *** Wordset DOUBLE-EXT
+ r'2rot|du<|'
+ # *** Wordset EXCEPTION
+ r'catch|throw|'
+ # *** Wordset EXCEPTION-EXT
+ r'abort|abort\"|'
+ # *** Wordset FACILITY
+ r'at-xy|key\?|page|'
+ # *** Wordset FACILITY-EXT
+ r'ekey|ekey>char|ekey\?|emit\?|ms|time&date|'
+ # *** Wordset FILE
+ r'BIN|CLOSE-FILE|CREATE-FILE|DELETE-FILE|FILE-POSITION|'
+ r'FILE-SIZE|INCLUDE-FILE|INCLUDED|OPEN-FILE|R\/O|'
+ r'R\/W|READ-FILE|READ-LINE|REPOSITION-FILE|RESIZE-FILE|'
+ r'S\"|SOURCE-ID|W/O|WRITE-FILE|WRITE-LINE|'
+ # *** Wordset FILE-EXT
+ r'FILE-STATUS|FLUSH-FILE|REFILL|RENAME-FILE|'
+ # *** Wordset FLOAT
+ r'>float|d>f|'
+ r'f!|f\*|f\+|f-|f\/|f0<|f0=|f<|f>d|f@|'
+ r'falign|faligned|fconstant|fdepth|fdrop|fdup|'
+ r'fliteral|float\+|floats|floor|fmax|fmin|'
+ r'fnegate|fover|frot|fround|fswap|fvariable|'
+ r'represent|'
+ # *** Wordset FLOAT-EXT
+ r'df!|df@|dfalign|dfaligned|dfloat\+|'
+ r'dfloats|f\*\*|f\.|fabs|facos|facosh|falog|'
+ r'fasin|fasinh|fatan|fatan2|fatanh|fcos|fcosh|'
+ r'fe\.|fexp|fexpm1|fln|flnp1|flog|fs\.|fsin|'
+ r'fsincos|fsinh|fsqrt|ftan|ftanh|f~|precision|'
+ r'set-precision|sf!|sf@|sfalign|sfaligned|sfloat\+|'
+ r'sfloats|'
+ # *** Wordset LOCAL
+ r'\(local\)|to|'
+ # *** Wordset LOCAL-EXT
+ r'locals\||'
+ # *** Wordset MEMORY
+ r'allocate|free|resize|'
+ # *** Wordset SEARCH
+ r'definitions|find|forth-wordlist|get-current|'
+ r'get-order|search-wordlist|set-current|set-order|'
+ r'wordlist|'
+ # *** Wordset SEARCH-EXT
+ r'also|forth|only|order|previous|'
+ # *** Wordset STRING
+ r'-trailing|\/string|blank|cmove|cmove>|compare|'
+ r'search|sliteral|'
+ # *** Wordset TOOLS
+ r'.s|dump|see|words|'
+ # *** Wordset TOOLS-EXT
+ r';code|'
+ r'ahead|assembler|bye|code|cs-pick|cs-roll|'
+ r'editor|state|\[else\]|\[if\]|\[then\]|'
+ # *** Wordset TOOLS-EXT-obsolescent
+ r'forget|'
+ # Forth 2012
+ r'defer|defer@|defer!|action-of|begin-structure|field:|buffer:|'
+ r'parse-name|buffer:|traverse-wordlist|n>r|nr>|2value|fvalue|'
+ r'name>interpret|name>compile|name>string|'
+ r'cfield:|end-structure)(?!\S)', Keyword),
+
+ # Numbers
+ (r'(\$[0-9A-F]+)', Number.Hex),
+ (r'(\#|%|&|\-|\+)?[0-9]+', Number.Integer),
+ (r'(\#|%|&|\-|\+)?[0-9.]+', Keyword.Type),
+ # amforth specific
+ (r'(@i|!i|@e|!e|pause|noop|turnkey|sleep|'
+ r'itype|icompare|sp@|sp!|rp@|rp!|up@|up!|'
+ r'>a|a>|a@|a!|a@+|a@-|>b|b>|b@|b!|b@+|b@-|'
+ r'find-name|1ms|'
+ r'sp0|rp0|\(evaluate\)|int-trap|int!)(?!\S)',
+ Name.Constant),
+ # a proposal
+ (r'(do-recognizer|r:fail|recognizer:|get-recognizers|'
+ r'set-recognizers|r:float|r>comp|r>int|r>post|'
+ r'r:name|r:word|r:dnum|r:num|recognizer|forth-recognizer|'
+ r'rec:num|rec:float|rec:word)(?!\S)', Name.Decorator),
+ # defining words. The next word is a new command name
+ (r'(Evalue|Rvalue|Uvalue|Edefer|Rdefer|Udefer)(\s+)',
+ bygroups(Keyword.Namespace, Text), 'worddef'),
+
+ (r'\S+', Name.Function), # Anything else is executed
+
+ ],
+ 'worddef': [
+ (r'\S+', Name.Class, '#pop'),
+ ],
+ 'stringdef': [
+ (r'[^"]+', String, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ """Forth uses : COMMAND ; quite a lot in a single line, so we're trying
+ to find that."""
+ if re.search('\n:[^\n]+;\n', text):
+ return 0.3
diff --git a/pygments/lexers/fortran.py b/pygments/lexers/fortran.py
new file mode 100644
index 0000000..d45581a
--- /dev/null
+++ b/pygments/lexers/fortran.py
@@ -0,0 +1,213 @@
+"""
+ pygments.lexers.fortran
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Fortran languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, include, words, using, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic
+
+__all__ = ['FortranLexer', 'FortranFixedLexer']
+
+
+class FortranLexer(RegexLexer):
+ """
+ Lexer for FORTRAN 90 code.
+
+ .. versionadded:: 0.10
+ """
+ name = 'Fortran'
+ url = 'https://fortran-lang.org/'
+ aliases = ['fortran', 'f90']
+ filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
+ mimetypes = ['text/x-fortran']
+ flags = re.IGNORECASE | re.MULTILINE
+
+ # Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
+ # Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
+ # Logical (?): NOT, AND, OR, EQV, NEQV
+
+ # Builtins:
+ # http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
+
+ tokens = {
+ 'root': [
+ (r'^#.*\n', Comment.Preproc),
+ (r'!.*\n', Comment),
+ include('strings'),
+ include('core'),
+ (r'[a-z][\w$]*', Name),
+ include('nums'),
+ (r'[\s]+', Text.Whitespace),
+ ],
+ 'core': [
+ # Statements
+
+ (r'\b(DO)(\s+)(CONCURRENT)\b', bygroups(Keyword, Text.Whitespace, Keyword)),
+ (r'\b(GO)(\s*)(TO)\b', bygroups(Keyword, Text.Whitespace, Keyword)),
+
+ (words((
+ 'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
+ 'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
+ 'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
+ 'CODIMENSION', 'COMMON', 'CONTIGUOUS', 'CONTAINS',
+ 'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
+ 'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
+ 'ENDASSOCIATE', 'ENDBLOCK', 'ENDDO', 'ENDENUM', 'ENDFORALL',
+ 'ENDFUNCTION', 'ENDIF', 'ENDINTERFACE', 'ENDMODULE', 'ENDPROGRAM',
+ 'ENDSELECT', 'ENDSUBMODULE', 'ENDSUBROUTINE', 'ENDTYPE', 'ENDWHERE',
+ 'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'ERROR STOP', 'EXIT',
+ 'EXTENDS', 'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
+ 'FUNCTION', 'GENERIC', 'IF', 'IMAGES', 'IMPLICIT',
+ 'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
+ 'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
+ 'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'ONLY', 'OPEN',
+ 'OPTIONAL', 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT',
+ 'PRIVATE', 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
+ 'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
+ 'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
+ 'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
+ 'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
+ Keyword),
+
+ # Data Types
+ (words((
+ 'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
+ 'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
+ 'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
+ 'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
+ 'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
+ 'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
+ 'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
+ 'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
+ prefix=r'\b', suffix=r'\s*\b'),
+ Keyword.Type),
+
+ # Operators
+ (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
+
+ (r'(::)', Keyword.Declaration),
+
+ (r'[()\[\],:&%;.]', Punctuation),
+ # Intrinsics
+ (words((
+ 'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
+ 'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
+ 'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
+ 'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
+ 'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
+ 'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
+ 'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
+ 'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
+ 'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
+ 'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
+ 'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
+ 'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
+ 'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
+ 'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
+ 'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
+ 'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
+ 'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
+ 'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
+ 'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
+ 'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
+ 'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
+ 'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
+ 'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
+ 'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
+ 'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
+ 'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
+ 'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
+ 'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
+ 'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
+ 'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
+ 'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
+ 'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
+ 'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
+ 'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
+ 'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
+ 'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
+ 'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
+ 'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
+ 'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
+ 'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
+ 'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
+ 'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
+ 'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
+ 'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
+ 'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
+ 'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
+ 'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
+ 'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
+ 'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
+ 'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
+ Name.Builtin),
+
+ # Booleans
+ (r'\.(true|false)\.', Name.Builtin),
+ # Comparing Operators
+ (r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
+ ],
+
+ 'strings': [
+ (r'"(\\[0-7]+|\\[^0-7]|[^"\\])*"', String.Double),
+ (r"'(\\[0-7]+|\\[^0-7]|[^'\\])*'", String.Single),
+ ],
+
+ 'nums': [
+ (r'\d+(?![.e])(_([1-9]|[a-z]\w*))?', Number.Integer),
+ (r'[+-]?\d*\.\d+([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float),
+ (r'[+-]?\d+\.\d*([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float),
+ (r'[+-]?\d+(\.\d*)?[ed][-+]?\d+(_([1-9]|[a-z]\w*))?', Number.Float),
+ ],
+ }
+
+
+class FortranFixedLexer(RegexLexer):
+ """
+ Lexer for fixed format Fortran.
+
+ .. versionadded:: 2.1
+ """
+ name = 'FortranFixed'
+ aliases = ['fortranfixed']
+ filenames = ['*.f', '*.F']
+
+ flags = re.IGNORECASE
+
+ def _lex_fortran(self, match, ctx=None):
+ """Lex a line just as free form fortran without line break."""
+ lexer = FortranLexer()
+ text = match.group(0) + "\n"
+ for index, token, value in lexer.get_tokens_unprocessed(text):
+ value = value.replace('\n', '')
+ if value != '':
+ yield index, token, value
+
+ tokens = {
+ 'root': [
+ (r'[C*].*\n', Comment),
+ (r'#.*\n', Comment.Preproc),
+ (r' {0,4}!.*\n', Comment),
+ (r'(.{5})', Name.Label, 'cont-char'),
+ (r'.*\n', using(FortranLexer)),
+ ],
+ 'cont-char': [
+ (' ', Text, 'code'),
+ ('0', Comment, 'code'),
+ ('.', Generic.Strong, 'code'),
+ ],
+ 'code': [
+ (r'(.{66})(.*)(\n)',
+ bygroups(_lex_fortran, Comment, Text.Whitespace), 'root'),
+ (r'(.*)(\n)', bygroups(_lex_fortran, Text.Whitespace), 'root'),
+ default('root'),
+ ]
+ }
diff --git a/pygments/lexers/foxpro.py b/pygments/lexers/foxpro.py
new file mode 100644
index 0000000..578c22a
--- /dev/null
+++ b/pygments/lexers/foxpro.py
@@ -0,0 +1,427 @@
+"""
+ pygments.lexers.foxpro
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Simple lexer for Microsoft Visual FoxPro source code.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer
+from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
+ Name, String
+
+__all__ = ['FoxProLexer']
+
+
+class FoxProLexer(RegexLexer):
+ """Lexer for Microsoft Visual FoxPro language.
+
+ FoxPro syntax allows to shorten all keywords and function names
+ to 4 characters. Shortened forms are not recognized by this lexer.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'FoxPro'
+ aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
+ filenames = ['*.PRG', '*.prg']
+ mimetype = []
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r';\s*\n', Punctuation), # consume newline
+ (r'(^|\n)\s*', Text, 'newline'),
+
+ # Square brackets may be used for array indices
+ # and for string literal. Look for arrays
+ # before matching string literals.
+ (r'(?<=\w)\[[0-9, ]+\]', Text),
+ (r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
+ (r'(^\s*\*|&&|&amp;&amp;).*?\n', Comment.Single),
+
+ (r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
+ r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
+ r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
+ r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
+ r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
+ r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
+ r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
+ r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
+ r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
+ r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
+ r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
+ r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
+ r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
+ r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
+ r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
+ r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
+ r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
+ r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
+ r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
+ r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
+ r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
+ r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
+ r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
+ r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
+ r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
+ r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
+ r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
+ r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
+ r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
+ r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
+ r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
+ r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
+ r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
+ r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
+ r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
+ r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
+ r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
+ r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
+ r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
+ r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
+ r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
+ r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
+ r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
+ r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
+ r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
+ r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
+ r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
+ r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
+ r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
+ r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
+ r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
+ r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
+ r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
+ r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
+ r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
+ r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
+ r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
+ r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
+ r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
+ r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
+ r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
+ r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
+ r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
+ r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
+ r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
+ r'YEAR)(?=\s*\()', Name.Function),
+
+ (r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
+ r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
+ r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
+ r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
+ r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
+ r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
+ r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
+ r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
+ r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
+ r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
+ r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
+ r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
+ r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
+
+ (r'THISFORMSET|THISFORM|THIS', Name.Builtin),
+
+ (r'Application|CheckBox|Collection|Column|ComboBox|'
+ r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
+ r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
+ r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
+ r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
+ r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
+ r'Project|Relation|ReportListener|Separator|Servers|Server|'
+ r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
+ r'XMLAdapter|XMLField|XMLTable', Name.Class),
+
+ (r'm\.[a-z_]\w*', Name.Variable),
+ (r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
+
+ (r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
+ r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
+ r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
+ r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
+ r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
+ r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
+ r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
+ r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
+ r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
+ r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
+ r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
+ r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
+ r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
+ r'BreakOnError|BufferModeOverride|BufferMode|'
+ r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
+ r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
+ r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
+ r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
+ r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
+ r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
+ r'ContinuousScroll|ControlBox|ControlCount|Controls|'
+ r'ControlSource|ConversionFunc|Count|CurrentControl|'
+ r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
+ r'CursorSchema|CursorSource|CursorStatus|Curvature|'
+ r'Database|DataSessionID|DataSession|DataSourceType|'
+ r'DataSource|DataType|DateFormat|DateMark|Debug|'
+ r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
+ r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
+ r'DeleteCmd|DeleteMark|Description|Desktop|'
+ r'Details|DisabledBackColor|DisabledForeColor|'
+ r'DisabledItemBackColor|DisabledItemForeColor|'
+ r'DisabledPicture|DisableEncode|DisplayCount|'
+ r'DisplayValue|Dockable|Docked|DockPosition|'
+ r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
+ r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
+ r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
+ r'DynamicFontItalic|DynamicFontStrikethru|'
+ r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
+ r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
+ r'DynamicLineHeight|EditorOptions|Enabled|'
+ r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
+ r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
+ r'FetchMemoDataSource|FetchMemo|FetchSize|'
+ r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
+ r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
+ r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
+ r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
+ r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
+ r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
+ r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
+ r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
+ r'HelpContextID|HideSelection|HighlightBackColor|'
+ r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
+ r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
+ r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
+ r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
+ r'InsertCmdDataSource|InsertCmdRefreshCmd|'
+ r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
+ r'InsertCmd|Instancing|IntegralHeight|'
+ r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
+ r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
+ r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
+ r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
+ r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
+ r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
+ r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
+ r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
+ r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
+ r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
+ r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
+ r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
+ r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
+ r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
+ r'NumberOfElements|Object|OLEClass|OLEDragMode|'
+ r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
+ r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
+ r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
+ r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
+ r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
+ r'OutputPageCount|OutputType|PageCount|PageHeight|'
+ r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
+ r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
+ r'Parent|Partition|PasswordChar|PictureMargin|'
+ r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
+ r'PictureVal|Picture|Prepared|'
+ r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
+ r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
+ r'ProjectHookLibrary|ProjectHook|QuietMode|'
+ r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
+ r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
+ r'RecordSource|RefreshAlias|'
+ r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
+ r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
+ r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
+ r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
+ r'Rotation|RowColChange|RowHeight|RowSourceType|'
+ r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
+ r'Seconds|SelectCmd|SelectedID|'
+ r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
+ r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
+ r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
+ r'ServerClass|ServerHelpFile|ServerName|'
+ r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
+ r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
+ r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
+ r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
+ r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
+ r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
+ r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
+ r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
+ r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
+ r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
+ r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
+ r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
+ r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
+ r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
+ r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
+ r'VersionCompany|VersionCopyright|VersionDescription|'
+ r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
+ r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
+ r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
+ r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
+ r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
+ r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
+ r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
+ r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
+ r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
+ r'XSDtype|ZoomBox)', Name.Attribute),
+
+ (r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
+ r'AddProperty|AddTableSchema|AddToSCC|Add|'
+ r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
+ r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
+ r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
+ r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
+ r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
+ r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
+ r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
+ r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
+ r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
+ r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
+ r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
+ r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
+ r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
+ r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
+ r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
+ r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
+ r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
+ r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
+ r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
+
+ (r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
+ r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
+ r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
+ r'AfterCursorUpdate|AfterDelete|AfterInsert|'
+ r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
+ r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
+ r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
+ r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
+ r'BeforeInsert|BeforeDock|BeforeOpenTables|'
+ r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
+ r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
+ r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
+ r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
+ r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
+ r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
+ r'dbc_AfterDropOffline|dbc_AfterDropTable|'
+ r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
+ r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
+ r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
+ r'dbc_AfterRenameTable|dbc_AfterRenameView|'
+ r'dbc_AfterValidateData|dbc_BeforeAddTable|'
+ r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
+ r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
+ r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
+ r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
+ r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
+ r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
+ r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
+ r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
+ r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
+ r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
+ r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
+ r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
+ r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
+ r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
+ r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
+ r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
+ r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
+ r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
+ r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
+ r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
+ r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
+ r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
+ r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
+ r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
+
+ (r'\s+', Text),
+ # everything else is not colored
+ (r'.', Text),
+ ],
+ 'newline': [
+ (r'\*.*?$', Comment.Single, '#pop'),
+ (r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
+ r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
+ r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
+ r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
+ r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
+ r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
+ r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
+ r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
+ r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
+ r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
+ r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
+ r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
+ r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
+ r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
+ r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
+ r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
+ r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
+ r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
+ r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
+ r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
+ r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
+ r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
+ r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
+ r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
+ r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
+ r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
+ r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
+ r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
+ r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
+ r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
+ r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
+ r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
+ r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
+ r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
+ r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
+ r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
+ r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
+ r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
+ r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
+ r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
+ r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
+ r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
+ r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
+ r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
+ r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
+ r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
+ r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
+ r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
+ r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
+ r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
+ r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
+ r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
+ r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
+ r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
+ r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
+ r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
+ r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
+ r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
+ r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
+ r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
+ r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
+ r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
+ r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
+ r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
+ r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
+ r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
+ r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
+ r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
+ r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
+ r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
+ r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
+ r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
+ r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
+ r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
+ Keyword.Reserved, '#pop'),
+ (r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
+ Comment.Preproc, '#pop'),
+ (r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
+ (r'.', Text, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/freefem.py b/pygments/lexers/freefem.py
new file mode 100644
index 0000000..6150c15
--- /dev/null
+++ b/pygments/lexers/freefem.py
@@ -0,0 +1,894 @@
+"""
+ pygments.lexers.freefem
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for FreeFem++ language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.token import Comment, Operator, Keyword, Name
+
+from pygments.lexers.c_cpp import CppLexer
+
+__all__ = ['FreeFemLexer']
+
+
+class FreeFemLexer(CppLexer):
+ """
+ For FreeFem++ source.
+
+ This is an extension of the CppLexer, as the FreeFem Language is a superset
+ of C++.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Freefem'
+ url = 'https://freefem.org/'
+ aliases = ['freefem']
+ filenames = ['*.edp']
+ mimetypes = ['text/x-freefem']
+
+ # Language operators
+ operators = {'+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\''}
+
+ # types
+ types = {'bool', 'border', 'complex', 'dmatrix', 'fespace', 'func', 'gslspline',
+ 'ifstream', 'int', 'macro', 'matrix', 'mesh', 'mesh3', 'mpiComm',
+ 'mpiGroup', 'mpiRequest', 'NewMacro', 'EndMacro', 'ofstream', 'Pmmap',
+ 'problem', 'Psemaphore', 'real', 'solve', 'string', 'varf'}
+
+ # finite element spaces
+ fespaces = {'BDM1', 'BDM1Ortho', 'Edge03d', 'Edge13d', 'Edge23d', 'FEQF', 'HCT',
+ 'P0', 'P03d', 'P0Edge', 'P1', 'P13d', 'P1b', 'P1b3d', 'P1bl', 'P1bl3d',
+ 'P1dc', 'P1Edge', 'P1nc', 'P2', 'P23d', 'P2b', 'P2BR', 'P2dc', 'P2Edge',
+ 'P2h', 'P2Morley', 'P2pnc', 'P3', 'P3dc', 'P3Edge', 'P4', 'P4dc',
+ 'P4Edge', 'P5Edge', 'RT0', 'RT03d', 'RT0Ortho', 'RT1', 'RT1Ortho',
+ 'RT2', 'RT2Ortho'}
+
+ # preprocessor
+ preprocessor = {'ENDIFMACRO', 'include', 'IFMACRO', 'load'}
+
+ # Language keywords
+ keywords = {
+ 'adj',
+ 'append',
+ 'area',
+ 'ARGV',
+ 'be',
+ 'binary',
+ 'BoundaryEdge',
+ 'bordermeasure',
+ 'CG',
+ 'Cholesky',
+ 'cin',
+ 'cout',
+ 'Crout',
+ 'default',
+ 'diag',
+ 'edgeOrientation',
+ 'endl',
+ 'false',
+ 'ffind',
+ 'FILE',
+ 'find',
+ 'fixed',
+ 'flush',
+ 'GMRES',
+ 'good',
+ 'hTriangle',
+ 'im',
+ 'imax',
+ 'imin',
+ 'InternalEdge',
+ 'l1',
+ 'l2',
+ 'label',
+ 'lenEdge',
+ 'length',
+ 'LINE',
+ 'linfty',
+ 'LU',
+ 'm',
+ 'max',
+ 'measure',
+ 'min',
+ 'mpiAnySource',
+ 'mpiBAND',
+ 'mpiBXOR',
+ 'mpiCommWorld',
+ 'mpiLAND',
+ 'mpiLOR',
+ 'mpiLXOR',
+ 'mpiMAX',
+ 'mpiMIN',
+ 'mpiPROD',
+ 'mpirank',
+ 'mpisize',
+ 'mpiSUM',
+ 'mpiUndefined',
+ 'n',
+ 'N',
+ 'nbe',
+ 'ndof',
+ 'ndofK',
+ 'noshowbase',
+ 'noshowpos',
+ 'notaregion',
+ 'nt',
+ 'nTonEdge',
+ 'nuEdge',
+ 'nuTriangle',
+ 'nv',
+ 'P',
+ 'pi',
+ 'precision',
+ 'qf1pE',
+ 'qf1pElump',
+ 'qf1pT',
+ 'qf1pTlump',
+ 'qfV1',
+ 'qfV1lump',
+ 'qf2pE',
+ 'qf2pT',
+ 'qf2pT4P1',
+ 'qfV2',
+ 'qf3pE',
+ 'qf4pE',
+ 'qf5pE',
+ 'qf5pT',
+ 'qfV5',
+ 'qf7pT',
+ 'qf9pT',
+ 'qfnbpE',
+ 'quantile',
+ 're',
+ 'region',
+ 'rfind',
+ 'scientific',
+ 'searchMethod',
+ 'setw',
+ 'showbase',
+ 'showpos',
+ 'sparsesolver',
+ 'sum',
+ 'tellp',
+ 'true',
+ 'UMFPACK',
+ 'unused',
+ 'whoinElement',
+ 'verbosity',
+ 'version',
+ 'volume',
+ 'x',
+ 'y',
+ 'z'
+ }
+
+ # Language shipped functions and class ( )
+ functions = {
+ 'abs',
+ 'acos',
+ 'acosh',
+ 'adaptmesh',
+ 'adj',
+ 'AffineCG',
+ 'AffineGMRES',
+ 'arg',
+ 'asin',
+ 'asinh',
+ 'assert',
+ 'atan',
+ 'atan2',
+ 'atanh',
+ 'atof',
+ 'atoi',
+ 'BFGS',
+ 'broadcast',
+ 'buildlayers',
+ 'buildmesh',
+ 'ceil',
+ 'chi',
+ 'complexEigenValue',
+ 'copysign',
+ 'change',
+ 'checkmovemesh',
+ 'clock',
+ 'cmaes',
+ 'conj',
+ 'convect',
+ 'cos',
+ 'cosh',
+ 'cube',
+ 'd',
+ 'dd',
+ 'dfft',
+ 'diffnp',
+ 'diffpos',
+ 'dimKrylov',
+ 'dist',
+ 'dumptable',
+ 'dx',
+ 'dxx',
+ 'dxy',
+ 'dxz',
+ 'dy',
+ 'dyx',
+ 'dyy',
+ 'dyz',
+ 'dz',
+ 'dzx',
+ 'dzy',
+ 'dzz',
+ 'EigenValue',
+ 'emptymesh',
+ 'erf',
+ 'erfc',
+ 'exec',
+ 'exit',
+ 'exp',
+ 'fdim',
+ 'floor',
+ 'fmax',
+ 'fmin',
+ 'fmod',
+ 'freeyams',
+ 'getARGV',
+ 'getline',
+ 'gmshload',
+ 'gmshload3',
+ 'gslcdfugaussianP',
+ 'gslcdfugaussianQ',
+ 'gslcdfugaussianPinv',
+ 'gslcdfugaussianQinv',
+ 'gslcdfgaussianP',
+ 'gslcdfgaussianQ',
+ 'gslcdfgaussianPinv',
+ 'gslcdfgaussianQinv',
+ 'gslcdfgammaP',
+ 'gslcdfgammaQ',
+ 'gslcdfgammaPinv',
+ 'gslcdfgammaQinv',
+ 'gslcdfcauchyP',
+ 'gslcdfcauchyQ',
+ 'gslcdfcauchyPinv',
+ 'gslcdfcauchyQinv',
+ 'gslcdflaplaceP',
+ 'gslcdflaplaceQ',
+ 'gslcdflaplacePinv',
+ 'gslcdflaplaceQinv',
+ 'gslcdfrayleighP',
+ 'gslcdfrayleighQ',
+ 'gslcdfrayleighPinv',
+ 'gslcdfrayleighQinv',
+ 'gslcdfchisqP',
+ 'gslcdfchisqQ',
+ 'gslcdfchisqPinv',
+ 'gslcdfchisqQinv',
+ 'gslcdfexponentialP',
+ 'gslcdfexponentialQ',
+ 'gslcdfexponentialPinv',
+ 'gslcdfexponentialQinv',
+ 'gslcdfexppowP',
+ 'gslcdfexppowQ',
+ 'gslcdftdistP',
+ 'gslcdftdistQ',
+ 'gslcdftdistPinv',
+ 'gslcdftdistQinv',
+ 'gslcdffdistP',
+ 'gslcdffdistQ',
+ 'gslcdffdistPinv',
+ 'gslcdffdistQinv',
+ 'gslcdfbetaP',
+ 'gslcdfbetaQ',
+ 'gslcdfbetaPinv',
+ 'gslcdfbetaQinv',
+ 'gslcdfflatP',
+ 'gslcdfflatQ',
+ 'gslcdfflatPinv',
+ 'gslcdfflatQinv',
+ 'gslcdflognormalP',
+ 'gslcdflognormalQ',
+ 'gslcdflognormalPinv',
+ 'gslcdflognormalQinv',
+ 'gslcdfgumbel1P',
+ 'gslcdfgumbel1Q',
+ 'gslcdfgumbel1Pinv',
+ 'gslcdfgumbel1Qinv',
+ 'gslcdfgumbel2P',
+ 'gslcdfgumbel2Q',
+ 'gslcdfgumbel2Pinv',
+ 'gslcdfgumbel2Qinv',
+ 'gslcdfweibullP',
+ 'gslcdfweibullQ',
+ 'gslcdfweibullPinv',
+ 'gslcdfweibullQinv',
+ 'gslcdfparetoP',
+ 'gslcdfparetoQ',
+ 'gslcdfparetoPinv',
+ 'gslcdfparetoQinv',
+ 'gslcdflogisticP',
+ 'gslcdflogisticQ',
+ 'gslcdflogisticPinv',
+ 'gslcdflogisticQinv',
+ 'gslcdfbinomialP',
+ 'gslcdfbinomialQ',
+ 'gslcdfpoissonP',
+ 'gslcdfpoissonQ',
+ 'gslcdfgeometricP',
+ 'gslcdfgeometricQ',
+ 'gslcdfnegativebinomialP',
+ 'gslcdfnegativebinomialQ',
+ 'gslcdfpascalP',
+ 'gslcdfpascalQ',
+ 'gslinterpakima',
+ 'gslinterpakimaperiodic',
+ 'gslinterpcsplineperiodic',
+ 'gslinterpcspline',
+ 'gslinterpsteffen',
+ 'gslinterplinear',
+ 'gslinterppolynomial',
+ 'gslranbernoullipdf',
+ 'gslranbeta',
+ 'gslranbetapdf',
+ 'gslranbinomialpdf',
+ 'gslranexponential',
+ 'gslranexponentialpdf',
+ 'gslranexppow',
+ 'gslranexppowpdf',
+ 'gslrancauchy',
+ 'gslrancauchypdf',
+ 'gslranchisq',
+ 'gslranchisqpdf',
+ 'gslranerlang',
+ 'gslranerlangpdf',
+ 'gslranfdist',
+ 'gslranfdistpdf',
+ 'gslranflat',
+ 'gslranflatpdf',
+ 'gslrangamma',
+ 'gslrangammaint',
+ 'gslrangammapdf',
+ 'gslrangammamt',
+ 'gslrangammaknuth',
+ 'gslrangaussian',
+ 'gslrangaussianratiomethod',
+ 'gslrangaussianziggurat',
+ 'gslrangaussianpdf',
+ 'gslranugaussian',
+ 'gslranugaussianratiomethod',
+ 'gslranugaussianpdf',
+ 'gslrangaussiantail',
+ 'gslrangaussiantailpdf',
+ 'gslranugaussiantail',
+ 'gslranugaussiantailpdf',
+ 'gslranlandau',
+ 'gslranlandaupdf',
+ 'gslrangeometricpdf',
+ 'gslrangumbel1',
+ 'gslrangumbel1pdf',
+ 'gslrangumbel2',
+ 'gslrangumbel2pdf',
+ 'gslranlogistic',
+ 'gslranlogisticpdf',
+ 'gslranlognormal',
+ 'gslranlognormalpdf',
+ 'gslranlogarithmicpdf',
+ 'gslrannegativebinomialpdf',
+ 'gslranpascalpdf',
+ 'gslranpareto',
+ 'gslranparetopdf',
+ 'gslranpoissonpdf',
+ 'gslranrayleigh',
+ 'gslranrayleighpdf',
+ 'gslranrayleightail',
+ 'gslranrayleightailpdf',
+ 'gslrantdist',
+ 'gslrantdistpdf',
+ 'gslranlaplace',
+ 'gslranlaplacepdf',
+ 'gslranlevy',
+ 'gslranweibull',
+ 'gslranweibullpdf',
+ 'gslsfairyAi',
+ 'gslsfairyBi',
+ 'gslsfairyAiscaled',
+ 'gslsfairyBiscaled',
+ 'gslsfairyAideriv',
+ 'gslsfairyBideriv',
+ 'gslsfairyAiderivscaled',
+ 'gslsfairyBiderivscaled',
+ 'gslsfairyzeroAi',
+ 'gslsfairyzeroBi',
+ 'gslsfairyzeroAideriv',
+ 'gslsfairyzeroBideriv',
+ 'gslsfbesselJ0',
+ 'gslsfbesselJ1',
+ 'gslsfbesselJn',
+ 'gslsfbesselY0',
+ 'gslsfbesselY1',
+ 'gslsfbesselYn',
+ 'gslsfbesselI0',
+ 'gslsfbesselI1',
+ 'gslsfbesselIn',
+ 'gslsfbesselI0scaled',
+ 'gslsfbesselI1scaled',
+ 'gslsfbesselInscaled',
+ 'gslsfbesselK0',
+ 'gslsfbesselK1',
+ 'gslsfbesselKn',
+ 'gslsfbesselK0scaled',
+ 'gslsfbesselK1scaled',
+ 'gslsfbesselKnscaled',
+ 'gslsfbesselj0',
+ 'gslsfbesselj1',
+ 'gslsfbesselj2',
+ 'gslsfbesseljl',
+ 'gslsfbessely0',
+ 'gslsfbessely1',
+ 'gslsfbessely2',
+ 'gslsfbesselyl',
+ 'gslsfbesseli0scaled',
+ 'gslsfbesseli1scaled',
+ 'gslsfbesseli2scaled',
+ 'gslsfbesselilscaled',
+ 'gslsfbesselk0scaled',
+ 'gslsfbesselk1scaled',
+ 'gslsfbesselk2scaled',
+ 'gslsfbesselklscaled',
+ 'gslsfbesselJnu',
+ 'gslsfbesselYnu',
+ 'gslsfbesselInuscaled',
+ 'gslsfbesselInu',
+ 'gslsfbesselKnuscaled',
+ 'gslsfbesselKnu',
+ 'gslsfbessellnKnu',
+ 'gslsfbesselzeroJ0',
+ 'gslsfbesselzeroJ1',
+ 'gslsfbesselzeroJnu',
+ 'gslsfclausen',
+ 'gslsfhydrogenicR1',
+ 'gslsfdawson',
+ 'gslsfdebye1',
+ 'gslsfdebye2',
+ 'gslsfdebye3',
+ 'gslsfdebye4',
+ 'gslsfdebye5',
+ 'gslsfdebye6',
+ 'gslsfdilog',
+ 'gslsfmultiply',
+ 'gslsfellintKcomp',
+ 'gslsfellintEcomp',
+ 'gslsfellintPcomp',
+ 'gslsfellintDcomp',
+ 'gslsfellintF',
+ 'gslsfellintE',
+ 'gslsfellintRC',
+ 'gslsferfc',
+ 'gslsflogerfc',
+ 'gslsferf',
+ 'gslsferfZ',
+ 'gslsferfQ',
+ 'gslsfhazard',
+ 'gslsfexp',
+ 'gslsfexpmult',
+ 'gslsfexpm1',
+ 'gslsfexprel',
+ 'gslsfexprel2',
+ 'gslsfexpreln',
+ 'gslsfexpintE1',
+ 'gslsfexpintE2',
+ 'gslsfexpintEn',
+ 'gslsfexpintE1scaled',
+ 'gslsfexpintE2scaled',
+ 'gslsfexpintEnscaled',
+ 'gslsfexpintEi',
+ 'gslsfexpintEiscaled',
+ 'gslsfShi',
+ 'gslsfChi',
+ 'gslsfexpint3',
+ 'gslsfSi',
+ 'gslsfCi',
+ 'gslsfatanint',
+ 'gslsffermidiracm1',
+ 'gslsffermidirac0',
+ 'gslsffermidirac1',
+ 'gslsffermidirac2',
+ 'gslsffermidiracint',
+ 'gslsffermidiracmhalf',
+ 'gslsffermidirachalf',
+ 'gslsffermidirac3half',
+ 'gslsffermidiracinc0',
+ 'gslsflngamma',
+ 'gslsfgamma',
+ 'gslsfgammastar',
+ 'gslsfgammainv',
+ 'gslsftaylorcoeff',
+ 'gslsffact',
+ 'gslsfdoublefact',
+ 'gslsflnfact',
+ 'gslsflndoublefact',
+ 'gslsflnchoose',
+ 'gslsfchoose',
+ 'gslsflnpoch',
+ 'gslsfpoch',
+ 'gslsfpochrel',
+ 'gslsfgammaincQ',
+ 'gslsfgammaincP',
+ 'gslsfgammainc',
+ 'gslsflnbeta',
+ 'gslsfbeta',
+ 'gslsfbetainc',
+ 'gslsfgegenpoly1',
+ 'gslsfgegenpoly2',
+ 'gslsfgegenpoly3',
+ 'gslsfgegenpolyn',
+ 'gslsfhyperg0F1',
+ 'gslsfhyperg1F1int',
+ 'gslsfhyperg1F1',
+ 'gslsfhypergUint',
+ 'gslsfhypergU',
+ 'gslsfhyperg2F0',
+ 'gslsflaguerre1',
+ 'gslsflaguerre2',
+ 'gslsflaguerre3',
+ 'gslsflaguerren',
+ 'gslsflambertW0',
+ 'gslsflambertWm1',
+ 'gslsflegendrePl',
+ 'gslsflegendreP1',
+ 'gslsflegendreP2',
+ 'gslsflegendreP3',
+ 'gslsflegendreQ0',
+ 'gslsflegendreQ1',
+ 'gslsflegendreQl',
+ 'gslsflegendrePlm',
+ 'gslsflegendresphPlm',
+ 'gslsflegendrearraysize',
+ 'gslsfconicalPhalf',
+ 'gslsfconicalPmhalf',
+ 'gslsfconicalP0',
+ 'gslsfconicalP1',
+ 'gslsfconicalPsphreg',
+ 'gslsfconicalPcylreg',
+ 'gslsflegendreH3d0',
+ 'gslsflegendreH3d1',
+ 'gslsflegendreH3d',
+ 'gslsflog',
+ 'gslsflogabs',
+ 'gslsflog1plusx',
+ 'gslsflog1plusxmx',
+ 'gslsfpowint',
+ 'gslsfpsiint',
+ 'gslsfpsi',
+ 'gslsfpsi1piy',
+ 'gslsfpsi1int',
+ 'gslsfpsi1',
+ 'gslsfpsin',
+ 'gslsfsynchrotron1',
+ 'gslsfsynchrotron2',
+ 'gslsftransport2',
+ 'gslsftransport3',
+ 'gslsftransport4',
+ 'gslsftransport5',
+ 'gslsfsin',
+ 'gslsfcos',
+ 'gslsfhypot',
+ 'gslsfsinc',
+ 'gslsflnsinh',
+ 'gslsflncosh',
+ 'gslsfanglerestrictsymm',
+ 'gslsfanglerestrictpos',
+ 'gslsfzetaint',
+ 'gslsfzeta',
+ 'gslsfzetam1',
+ 'gslsfzetam1int',
+ 'gslsfhzeta',
+ 'gslsfetaint',
+ 'gslsfeta',
+ 'imag',
+ 'int1d',
+ 'int2d',
+ 'int3d',
+ 'intalledges',
+ 'intallfaces',
+ 'interpolate',
+ 'invdiff',
+ 'invdiffnp',
+ 'invdiffpos',
+ 'Isend',
+ 'isInf',
+ 'isNaN',
+ 'isoline',
+ 'Irecv',
+ 'j0',
+ 'j1',
+ 'jn',
+ 'jump',
+ 'lgamma',
+ 'LinearCG',
+ 'LinearGMRES',
+ 'log',
+ 'log10',
+ 'lrint',
+ 'lround',
+ 'max',
+ 'mean',
+ 'medit',
+ 'min',
+ 'mmg3d',
+ 'movemesh',
+ 'movemesh23',
+ 'mpiAlltoall',
+ 'mpiAlltoallv',
+ 'mpiAllgather',
+ 'mpiAllgatherv',
+ 'mpiAllReduce',
+ 'mpiBarrier',
+ 'mpiGather',
+ 'mpiGatherv',
+ 'mpiRank',
+ 'mpiReduce',
+ 'mpiScatter',
+ 'mpiScatterv',
+ 'mpiSize',
+ 'mpiWait',
+ 'mpiWaitAny',
+ 'mpiWtick',
+ 'mpiWtime',
+ 'mshmet',
+ 'NaN',
+ 'NLCG',
+ 'on',
+ 'plot',
+ 'polar',
+ 'Post',
+ 'pow',
+ 'processor',
+ 'processorblock',
+ 'projection',
+ 'randinit',
+ 'randint31',
+ 'randint32',
+ 'random',
+ 'randreal1',
+ 'randreal2',
+ 'randreal3',
+ 'randres53',
+ 'Read',
+ 'readmesh',
+ 'readmesh3',
+ 'Recv',
+ 'rint',
+ 'round',
+ 'savemesh',
+ 'savesol',
+ 'savevtk',
+ 'seekg',
+ 'Sent',
+ 'set',
+ 'sign',
+ 'signbit',
+ 'sin',
+ 'sinh',
+ 'sort',
+ 'splitComm',
+ 'splitmesh',
+ 'sqrt',
+ 'square',
+ 'srandom',
+ 'srandomdev',
+ 'Stringification',
+ 'swap',
+ 'system',
+ 'tan',
+ 'tanh',
+ 'tellg',
+ 'tetg',
+ 'tetgconvexhull',
+ 'tetgreconstruction',
+ 'tetgtransfo',
+ 'tgamma',
+ 'triangulate',
+ 'trunc',
+ 'Wait',
+ 'Write',
+ 'y0',
+ 'y1',
+ 'yn'
+ }
+
+ # function parameters
+ parameters = {
+ 'A',
+ 'A1',
+ 'abserror',
+ 'absolute',
+ 'aniso',
+ 'aspectratio',
+ 'B',
+ 'B1',
+ 'bb',
+ 'beginend',
+ 'bin',
+ 'boundary',
+ 'bw',
+ 'close',
+ 'cmm',
+ 'coef',
+ 'composante',
+ 'cutoff',
+ 'datafilename',
+ 'dataname',
+ 'dim',
+ 'distmax',
+ 'displacement',
+ 'doptions',
+ 'dparams',
+ 'eps',
+ 'err',
+ 'errg',
+ 'facemerge',
+ 'facetcl',
+ 'factorize',
+ 'file',
+ 'fill',
+ 'fixedborder',
+ 'flabel',
+ 'flags',
+ 'floatmesh',
+ 'floatsol',
+ 'fregion',
+ 'gradation',
+ 'grey',
+ 'hmax',
+ 'hmin',
+ 'holelist',
+ 'hsv',
+ 'init',
+ 'inquire',
+ 'inside',
+ 'IsMetric',
+ 'iso',
+ 'ivalue',
+ 'keepbackvertices',
+ 'label',
+ 'labeldown',
+ 'labelmid',
+ 'labelup',
+ 'levelset',
+ 'loptions',
+ 'lparams',
+ 'maxit',
+ 'maxsubdiv',
+ 'meditff',
+ 'mem',
+ 'memory',
+ 'metric',
+ 'mode',
+ 'nbarrow',
+ 'nbiso',
+ 'nbiter',
+ 'nbjacoby',
+ 'nboffacetcl',
+ 'nbofholes',
+ 'nbofregions',
+ 'nbregul',
+ 'nbsmooth',
+ 'nbvx',
+ 'ncv',
+ 'nev',
+ 'nomeshgeneration',
+ 'normalization',
+ 'omega',
+ 'op',
+ 'optimize',
+ 'option',
+ 'options',
+ 'order',
+ 'orientation',
+ 'periodic',
+ 'power',
+ 'precon',
+ 'prev',
+ 'ps',
+ 'ptmerge',
+ 'qfe',
+ 'qforder',
+ 'qft',
+ 'qfV',
+ 'ratio',
+ 'rawvector',
+ 'reffacelow',
+ 'reffacemid',
+ 'reffaceup',
+ 'refnum',
+ 'reftet',
+ 'reftri',
+ 'region',
+ 'regionlist',
+ 'renumv',
+ 'rescaling',
+ 'ridgeangle',
+ 'save',
+ 'sigma',
+ 'sizeofvolume',
+ 'smoothing',
+ 'solver',
+ 'sparams',
+ 'split',
+ 'splitin2',
+ 'splitpbedge',
+ 'stop',
+ 'strategy',
+ 'swap',
+ 'switch',
+ 'sym',
+ 't',
+ 'tgv',
+ 'thetamax',
+ 'tol',
+ 'tolpivot',
+ 'tolpivotsym',
+ 'transfo',
+ 'U2Vc',
+ 'value',
+ 'varrow',
+ 'vector',
+ 'veps',
+ 'viso',
+ 'wait',
+ 'width',
+ 'withsurfacemesh',
+ 'WindowIndex',
+ 'which',
+ 'zbound'
+ }
+
+ # deprecated
+ deprecated = {'fixeborder'}
+
+ # do not highlight
+ suppress_highlight = {
+ 'alignof',
+ 'asm',
+ 'constexpr',
+ 'decltype',
+ 'div',
+ 'double',
+ 'grad',
+ 'mutable',
+ 'namespace',
+ 'noexcept',
+ 'restrict',
+ 'static_assert',
+ 'template',
+ 'this',
+ 'thread_local',
+ 'typeid',
+ 'typename',
+ 'using'
+ }
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack):
+ if value in self.operators:
+ yield index, Operator, value
+ elif value in self.types:
+ yield index, Keyword.Type, value
+ elif value in self.fespaces:
+ yield index, Name.Class, value
+ elif value in self.preprocessor:
+ yield index, Comment.Preproc, value
+ elif value in self.keywords:
+ yield index, Keyword.Reserved, value
+ elif value in self.functions:
+ yield index, Name.Function, value
+ elif value in self.parameters:
+ yield index, Keyword.Pseudo, value
+ elif value in self.suppress_highlight:
+ yield index, Name, value
+ else:
+ yield index, token, value
diff --git a/pygments/lexers/func.py b/pygments/lexers/func.py
new file mode 100644
index 0000000..6018f85
--- /dev/null
+++ b/pygments/lexers/func.py
@@ -0,0 +1,108 @@
+"""
+ pygments.lexers.func
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for FunC.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Whitespace, Punctuation
+
+__all__ = ['FuncLexer']
+
+
+class FuncLexer(RegexLexer):
+ """
+ For FunC source code.
+ """
+
+ name = 'FunC'
+ aliases = ['func', 'fc']
+ filenames = ['*.fc', '*.func']
+
+ # 1. Does not start from "
+ # 2. Can start from ` and end with `, containing any character
+ # 3. Starts with underscore or { or } and have more than 1 character after it
+ # 4. Starts with letter, contains letters, numbers and underscores
+ identifier = r'(?!")(`([^`]+)`|((?=_)_|(?=\{)\{|(?=\})\}|(?![_`{}]))([^;,\[\]\(\)\s~.]+))'
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+
+ include('keywords'),
+ include('strings'),
+ include('directives'),
+ include('numeric'),
+ include('comments'),
+ include('storage'),
+ include('functions'),
+ include('variables'),
+
+ (r'[.;(),\[\]~{}]', Punctuation)
+ ],
+ 'keywords': [
+ (words((
+ '<=>', '>=', '<=', '!=', '==', '^>>', '~>>',
+ '>>', '<<', '/%', '^%', '~%', '^/', '~/', '+=',
+ '-=', '*=', '/=', '~/=', '^/=', '%=', '^%=', '<<=',
+ '>>=', '~>>=', '^>>=', '&=', '|=', '^=', '^', '=',
+ '~', '/', '%', '-', '*', '+','>',
+ '<', '&', '|', ':', '?'), prefix=r'(?<=\s)', suffix=r'(?=\s)'),
+ Operator),
+ (words((
+ 'if', 'ifnot',
+ 'else', 'elseif', 'elseifnot',
+ 'while', 'do', 'until', 'repeat',
+ 'return', 'impure', 'method_id',
+ 'forall', 'asm', 'inline', 'inline_ref'), prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Constant),
+ ],
+ 'directives': [
+ (r'#include|#pragma', Keyword, 'directive'),
+ ],
+ 'directive': [
+ include('strings'),
+ (r'\s+', Whitespace),
+ (r'version|not-version', Keyword),
+ (r'(>=|<=|=|>|<|\^)?([0-9]+)(.[0-9]+)?(.[0-9]+)?', Number), # version
+ (r';', Text, '#pop')
+ ],
+ 'strings': [
+ (r'\"([^\n\"]+)\"[Hhcusa]?', String),
+ ],
+ 'numeric': [
+ (r'\b(-?(?!_)([\d_]+|0x[\d_a-fA-F]+)|0b[1_0]+)(?<!_)(?=[\s\)\],;])', Number)
+ ],
+ 'comments': [
+ (r';;([^\n]*)', Comment.Singleline),
+ (r'\{-', Comment.Multiline, 'comment'),
+ ],
+ 'comment': [
+ (r'[^-}{]+', Comment.Multiline),
+ (r'\{-', Comment.Multiline, '#push'),
+ (r'-\}', Comment.Multiline, '#pop'),
+ (r'[-}{]', Comment.Multiline),
+ ],
+ 'storage': [
+ (words((
+ 'var', 'int', 'slice', 'tuple',
+ 'cell', 'builder', 'cont', '_'),
+ prefix=r'\b', suffix=r'(?=[\s\(\),\[\]])'),
+ Keyword.Type),
+ (words(('global', 'const'), prefix=r'\b', suffix=r'\b'), Keyword.Constant),
+ ],
+ 'variables': [
+ (identifier, Name.Variable),
+ ],
+ 'functions': [
+ # identifier followed by (
+ (identifier + r'(?=[\(])', Name.Function),
+ ]
+ }
diff --git a/pygments/lexers/functional.py b/pygments/lexers/functional.py
new file mode 100644
index 0000000..58ba31d
--- /dev/null
+++ b/pygments/lexers/functional.py
@@ -0,0 +1,20 @@
+"""
+ pygments.lexers.functional
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Just export lexer classes previously contained in this module.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \
+ NewLispLexer, ShenLexer
+from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \
+ KokaLexer
+from pygments.lexers.theorem import CoqLexer
+from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
+ ElixirConsoleLexer, ElixirLexer
+from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
+
+__all__ = []
diff --git a/pygments/lexers/futhark.py b/pygments/lexers/futhark.py
new file mode 100644
index 0000000..95b624d
--- /dev/null
+++ b/pygments/lexers/futhark.py
@@ -0,0 +1,106 @@
+"""
+ pygments.lexers.futhark
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Futhark language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+from pygments import unistring as uni
+
+__all__ = ['FutharkLexer']
+
+
+class FutharkLexer(RegexLexer):
+ """
+ A Futhark lexer
+
+ .. versionadded:: 2.8
+ """
+ name = 'Futhark'
+ url = 'https://futhark-lang.org/'
+ aliases = ['futhark']
+ filenames = ['*.fut']
+ mimetypes = ['text/x-futhark']
+
+ num_types = ('i8', 'i16', 'i32', 'i64', 'u8', 'u16', 'u32', 'u64', 'f32', 'f64')
+
+ other_types = ('bool', )
+
+ reserved = ('if', 'then', 'else', 'def', 'let', 'loop', 'in', 'with',
+ 'type', 'type~', 'type^',
+ 'val', 'entry', 'for', 'while', 'do', 'case', 'match',
+ 'include', 'import', 'module', 'open', 'local', 'assert', '_')
+
+ ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
+ 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
+ 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
+ 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
+
+ num_postfix = r'(%s)?' % '|'.join(num_types)
+
+ identifier_re = '[a-zA-Z_][a-zA-Z_0-9\']*'
+
+ # opstart_re = '+\-\*/%=\!><\|&\^'
+
+ tokens = {
+ 'root': [
+ (r'--(.*?)$', Comment.Single),
+ (r'\s+', Whitespace),
+ (r'\(\)', Punctuation),
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'\b(%s)(?!\')\b' % '|'.join(num_types + other_types), Keyword.Type),
+
+ # Identifiers
+ (r'#\[([a-zA-Z_\(\) ]*)\]', Comment.Preproc),
+ (r'[#!]?(%s\.)*%s' % (identifier_re, identifier_re), Name),
+
+ (r'\\', Operator),
+ (r'[-+/%=!><|&*^][-+/%=!><|&*^.]*', Operator),
+ (r'[][(),:;`{}?.\'~^]', Punctuation),
+
+ # Numbers
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*_*[pP][+-]?\d(_*\d)*' + num_postfix,
+ Number.Float),
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*\.[\da-fA-F](_*[\da-fA-F])*'
+ r'(_*[pP][+-]?\d(_*\d)*)?' + num_postfix, Number.Float),
+ (r'\d(_*\d)*_*[eE][+-]?\d(_*\d)*' + num_postfix, Number.Float),
+ (r'\d(_*\d)*\.\d(_*\d)*(_*[eE][+-]?\d(_*\d)*)?' + num_postfix, Number.Float),
+ (r'0[bB]_*[01](_*[01])*' + num_postfix, Number.Bin),
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*' + num_postfix, Number.Hex),
+ (r'\d(_*\d)*' + num_postfix, Number.Integer),
+
+ # Character/String Literals
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ # Special
+ (r'\[[a-zA-Z_\d]*\]', Keyword.Type),
+ (r'\(\)', Name.Builtin),
+ ],
+ 'character': [
+ # Allows multi-chars, incorrectly.
+ (r"[^\\']'", String.Char, '#pop'),
+ (r"\\", String.Escape, 'escape'),
+ ("'", String.Char, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r"\\", String.Escape, 'escape'),
+ ('"', String, '#pop'),
+ ],
+
+ 'escape': [
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
+ (r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'),
+ ('|'.join(ascii), String.Escape, '#pop'),
+ (r'o[0-7]+', String.Escape, '#pop'),
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
+ (r'\d+', String.Escape, '#pop'),
+ (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/gcodelexer.py b/pygments/lexers/gcodelexer.py
new file mode 100644
index 0000000..2dd2875
--- /dev/null
+++ b/pygments/lexers/gcodelexer.py
@@ -0,0 +1,35 @@
+"""
+ pygments.lexers.gcodelexer
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the G Code Language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Name, Text, Keyword, Number
+
+__all__ = ['GcodeLexer']
+
+
+class GcodeLexer(RegexLexer):
+ """
+ For gcode source code.
+
+ .. versionadded:: 2.9
+ """
+ name = 'g-code'
+ aliases = ['gcode']
+ filenames = ['*.gcode']
+
+ tokens = {
+ 'root': [
+ (r';.*\n', Comment),
+ (r'^[gmGM]\d{1,4}\s', Name.Builtin), # M or G commands
+ (r'([^gGmM])([+-]?\d*[.]?\d+)', bygroups(Keyword, Number)),
+ (r'\s', Text.Whitespace),
+ (r'.*\n', Text),
+ ]
+ }
diff --git a/pygments/lexers/gdscript.py b/pygments/lexers/gdscript.py
new file mode 100644
index 0000000..0bfb43f
--- /dev/null
+++ b/pygments/lexers/gdscript.py
@@ -0,0 +1,188 @@
+"""
+ pygments.lexers.gdscript
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for GDScript.
+
+ Modified by Daniel J. Ramirez <djrmuv@gmail.com> based on the original
+ python.py.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, default, words, \
+ combined
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ["GDScriptLexer"]
+
+
+class GDScriptLexer(RegexLexer):
+ """
+ For GDScript source code.
+ """
+
+ name = "GDScript"
+ url = 'https://www.godotengine.org'
+ aliases = ["gdscript", "gd"]
+ filenames = ["*.gd"]
+ mimetypes = ["text/x-gdscript", "application/x-gdscript"]
+
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting
+ (r"%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?"
+ "[hlL]?[E-GXc-giorsux%]",
+ String.Interpol),
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r"%", ttype),
+ # newlines are an error (use "nl" state)
+ ]
+
+ tokens = {
+ "root": [
+ (r"\n", Whitespace),
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+ bygroups(Whitespace, String.Affix, String.Doc)),
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+ bygroups(Whitespace, String.Affix, String.Doc)),
+ (r"[^\S\n]+", Whitespace),
+ (r"#.*$", Comment.Single),
+ (r"[]{}:(),;[]", Punctuation),
+ (r"(\\)(\n)", bygroups(Text, Whitespace)),
+ (r"\\", Text),
+ (r"(in|and|or|not)\b", Operator.Word),
+ (r"!=|==|<<|>>|&&|\+=|-=|\*=|/=|%=|&=|\|=|\|\||[-~+/*%=<>&^.!|$]",
+ Operator),
+ include("keywords"),
+ (r"(func)(\s+)", bygroups(Keyword, Whitespace), "funcname"),
+ (r"(class)(\s+)", bygroups(Keyword, Whitespace), "classname"),
+ include("builtins"),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
+ bygroups(String.Affix, String.Double),
+ "tdqs"),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
+ bygroups(String.Affix, String.Single),
+ "tsqs"),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
+ bygroups(String.Affix, String.Double),
+ "dqs"),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
+ bygroups(String.Affix, String.Single),
+ "sqs"),
+ ('([uUbB]?)(""")',
+ bygroups(String.Affix, String.Double),
+ combined("stringescape", "tdqs")),
+ ("([uUbB]?)(''')",
+ bygroups(String.Affix, String.Single),
+ combined("stringescape", "tsqs")),
+ ('([uUbB]?)(")',
+ bygroups(String.Affix, String.Double),
+ combined("stringescape", "dqs")),
+ ("([uUbB]?)(')",
+ bygroups(String.Affix, String.Single),
+ combined("stringescape", "sqs")),
+ include("name"),
+ include("numbers"),
+ ],
+ "keywords": [
+ (words(("and", "in", "not", "or", "as", "breakpoint", "class",
+ "class_name", "extends", "is", "func", "setget", "signal",
+ "tool", "const", "enum", "export", "onready", "static",
+ "var", "break", "continue", "if", "elif", "else", "for",
+ "pass", "return", "match", "while", "remote", "master",
+ "puppet", "remotesync", "mastersync", "puppetsync"),
+ suffix=r"\b"), Keyword),
+ ],
+ "builtins": [
+ (words(("Color8", "ColorN", "abs", "acos", "asin", "assert", "atan",
+ "atan2", "bytes2var", "ceil", "char", "clamp", "convert",
+ "cos", "cosh", "db2linear", "decimals", "dectime", "deg2rad",
+ "dict2inst", "ease", "exp", "floor", "fmod", "fposmod",
+ "funcref", "hash", "inst2dict", "instance_from_id", "is_inf",
+ "is_nan", "lerp", "linear2db", "load", "log", "max", "min",
+ "nearest_po2", "pow", "preload", "print", "print_stack",
+ "printerr", "printraw", "prints", "printt", "rad2deg",
+ "rand_range", "rand_seed", "randf", "randi", "randomize",
+ "range", "round", "seed", "sign", "sin", "sinh", "sqrt",
+ "stepify", "str", "str2var", "tan", "tan", "tanh",
+ "type_exist", "typeof", "var2bytes", "var2str", "weakref",
+ "yield"), prefix=r"(?<!\.)", suffix=r"\b"),
+ Name.Builtin),
+ (r"((?<!\.)(self|false|true)|(PI|TAU|NAN|INF)" r")\b",
+ Name.Builtin.Pseudo),
+ (words(("bool", "int", "float", "String", "NodePath", "Vector2",
+ "Rect2", "Transform2D", "Vector3", "Rect3", "Plane", "Quat",
+ "Basis", "Transform", "Color", "RID", "Object", "NodePath",
+ "Dictionary", "Array", "PackedByteArray", "PackedInt32Array",
+ "PackedInt64Array", "PackedFloat32Array", "PackedFloat64Array",
+ "PackedStringArray", "PackedVector2Array", "PackedVector3Array",
+ "PackedColorArray", "null", "void"),
+ prefix=r"(?<!\.)", suffix=r"\b"),
+ Name.Builtin.Type),
+ ],
+ "numbers": [
+ (r"(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?", Number.Float),
+ (r"\d+[eE][+-]?[0-9]+j?", Number.Float),
+ (r"0[xX][a-fA-F0-9]+", Number.Hex),
+ (r"\d+j?", Number.Integer),
+ ],
+ "name": [(r"[a-zA-Z_]\w*", Name)],
+ "funcname": [(r"[a-zA-Z_]\w*", Name.Function, "#pop"), default("#pop")],
+ "classname": [(r"[a-zA-Z_]\w*", Name.Class, "#pop")],
+ "stringescape": [
+ (
+ r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r"U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})",
+ String.Escape,
+ )
+ ],
+ "strings-single": innerstring_rules(String.Single),
+ "strings-double": innerstring_rules(String.Double),
+ "dqs": [
+ (r'"', String.Double, "#pop"),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include("strings-double"),
+ ],
+ "sqs": [
+ (r"'", String.Single, "#pop"),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include("strings-single"),
+ ],
+ "tdqs": [
+ (r'"""', String.Double, "#pop"),
+ include("strings-double"),
+ (r"\n", Whitespace),
+ ],
+ "tsqs": [
+ (r"'''", String.Single, "#pop"),
+ include("strings-single"),
+ (r"\n", Whitespace),
+ ],
+ }
+
+ def analyse_text(text):
+ score = 0.0
+
+ if re.search(
+ r"func (_ready|_init|_input|_process|_unhandled_input)", text
+ ):
+ score += 0.8
+
+ if re.search(
+ r"(extends |class_name |onready |preload|load|setget|func [^_])",
+ text
+ ):
+ score += 0.4
+
+ if re.search(r"(var|const|enum|export|signal|tool)", text):
+ score += 0.2
+
+ return min(score, 1.0)
diff --git a/pygments/lexers/go.py b/pygments/lexers/go.py
new file mode 100644
index 0000000..8b7d869
--- /dev/null
+++ b/pygments/lexers/go.py
@@ -0,0 +1,98 @@
+"""
+ pygments.lexers.go
+ ~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Google Go language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['GoLexer']
+
+
+class GoLexer(RegexLexer):
+ """
+ For Go source.
+
+ .. versionadded:: 1.2
+ """
+ name = 'Go'
+ url = 'https://go.dev/'
+ filenames = ['*.go']
+ aliases = ['go', 'golang']
+ mimetypes = ['text/x-gosrc']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuations
+ (r'//(.*?)$', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'(import|package)\b', Keyword.Namespace),
+ (r'(var|func|struct|map|chan|type|interface|const)\b',
+ Keyword.Declaration),
+ (words((
+ 'break', 'default', 'select', 'case', 'defer', 'go',
+ 'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
+ 'continue', 'for', 'return'), suffix=r'\b'),
+ Keyword),
+ (r'(true|false|iota|nil)\b', Keyword.Constant),
+ # It seems the builtin types aren't actually keywords, but
+ # can be used as functions. So we need two declarations.
+ (words((
+ 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
+ 'int', 'int8', 'int16', 'int32', 'int64',
+ 'float', 'float32', 'float64',
+ 'complex64', 'complex128', 'byte', 'rune',
+ 'string', 'bool', 'error', 'uintptr', 'any', 'comparable',
+ 'print', 'println', 'panic', 'recover', 'close', 'complex',
+ 'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
+ 'new', 'make'), suffix=r'\b(\()'),
+ bygroups(Name.Builtin, Punctuation)),
+ (words((
+ 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
+ 'int', 'int8', 'int16', 'int32', 'int64',
+ 'float', 'float32', 'float64',
+ 'complex64', 'complex128', 'byte', 'rune',
+ 'string', 'bool', 'error', 'uintptr', 'any', 'comparable'), suffix=r'\b'),
+ Keyword.Type),
+ # imaginary_lit
+ (r'\d+i', Number),
+ (r'\d+\.\d*([Ee][-+]\d+)?i', Number),
+ (r'\.\d+([Ee][-+]\d+)?i', Number),
+ (r'\d+[Ee][-+]\d+i', Number),
+ # float_lit
+ (r'\d+(\.\d+[eE][+\-]?\d+|'
+ r'\.\d*|[eE][+\-]?\d+)', Number.Float),
+ (r'\.\d+([eE][+\-]?\d+)?', Number.Float),
+ # int_lit
+ # -- octal_lit
+ (r'0[0-7]+', Number.Oct),
+ # -- hex_lit
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ # -- decimal_lit
+ (r'(0|[1-9][0-9]*)', Number.Integer),
+ # char_lit
+ (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
+ r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
+ String.Char),
+ # StringLiteral
+ # -- raw_string_lit
+ (r'`[^`]*`', String),
+ # -- interpreted_string_lit
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # Tokens
+ (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
+ r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&]'
+ r'|~|\|)', Operator),
+ (r'[|^<>=!()\[\]{}.,;:]', Punctuation),
+ # identifier
+ (r'[^\W\d]\w*', Name.Other),
+ ]
+ }
diff --git a/pygments/lexers/grammar_notation.py b/pygments/lexers/grammar_notation.py
new file mode 100644
index 0000000..4be4144
--- /dev/null
+++ b/pygments/lexers/grammar_notation.py
@@ -0,0 +1,265 @@
+"""
+ pygments.lexers.grammar_notation
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for grammar notations like BNF.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, this, using, words
+from pygments.token import Comment, Keyword, Literal, Name, Number, \
+ Operator, Punctuation, String, Text, Whitespace
+
+__all__ = ['BnfLexer', 'AbnfLexer', 'JsgfLexer', 'PegLexer']
+
+
+class BnfLexer(RegexLexer):
+ """
+ This lexer is for grammar notations which are similar to
+ original BNF.
+
+ In order to maximize a number of targets of this lexer,
+ let's decide some designs:
+
+ * We don't distinguish `Terminal Symbol`.
+
+ * We do assume that `NonTerminal Symbol` are always enclosed
+ with arrow brackets.
+
+ * We do assume that `NonTerminal Symbol` may include
+ any printable characters except arrow brackets and ASCII 0x20.
+ This assumption is for `RBNF <http://www.rfc-base.org/txt/rfc-5511.txt>`_.
+
+ * We do assume that target notation doesn't support comment.
+
+ * We don't distinguish any operators and punctuation except
+ `::=`.
+
+ Though these decision making might cause too minimal highlighting
+ and you might be disappointed, but it is reasonable for us.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'BNF'
+ aliases = ['bnf']
+ filenames = ['*.bnf']
+ mimetypes = ['text/x-bnf']
+
+ tokens = {
+ 'root': [
+ (r'(<)([ -;=?-~]+)(>)',
+ bygroups(Punctuation, Name.Class, Punctuation)),
+
+ # an only operator
+ (r'::=', Operator),
+
+ # fallback
+ (r'[^<>:]+', Text), # for performance
+ (r'.', Text),
+ ],
+ }
+
+
+class AbnfLexer(RegexLexer):
+ """
+ Lexer for IETF 7405 ABNF.
+
+ (Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_) grammars.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'ABNF'
+ url = 'http://www.ietf.org/rfc/rfc7405.txt'
+ aliases = ['abnf']
+ filenames = ['*.abnf']
+ mimetypes = ['text/x-abnf']
+
+ _core_rules = (
+ 'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT',
+ 'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET',
+ 'SP', 'VCHAR', 'WSP')
+
+ tokens = {
+ 'root': [
+ # comment
+ (r';.*$', Comment.Single),
+
+ # quoted
+ # double quote itself in this state, it is as '%x22'.
+ (r'(%[si])?"[^"]*"', Literal),
+
+ # binary (but i have never seen...)
+ (r'%b[01]+\-[01]+\b', Literal), # range
+ (r'%b[01]+(\.[01]+)*\b', Literal), # concat
+
+ # decimal
+ (r'%d[0-9]+\-[0-9]+\b', Literal), # range
+ (r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat
+
+ # hexadecimal
+ (r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range
+ (r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat
+
+ # repetition (<a>*<b>element) including nRule
+ (r'\b[0-9]+\*[0-9]+', Operator),
+ (r'\b[0-9]+\*', Operator),
+ (r'\b[0-9]+', Operator),
+ (r'\*', Operator),
+
+ # Strictly speaking, these are not keyword but
+ # are called `Core Rule'.
+ (words(_core_rules, suffix=r'\b'), Keyword),
+
+ # nonterminals (ALPHA *(ALPHA / DIGIT / "-"))
+ (r'[a-zA-Z][a-zA-Z0-9-]*\b', Name.Class),
+
+ # operators
+ (r'(=/|=|/)', Operator),
+
+ # punctuation
+ (r'[\[\]()]', Punctuation),
+
+ # fallback
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ }
+
+
+class JsgfLexer(RegexLexer):
+ """
+ For JSpeech Grammar Format grammars.
+
+ .. versionadded:: 2.2
+ """
+ name = 'JSGF'
+ url = 'https://www.w3.org/TR/jsgf/'
+ aliases = ['jsgf']
+ filenames = ['*.jsgf']
+ mimetypes = ['application/jsgf', 'application/x-jsgf', 'text/jsgf']
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('non-comments'),
+ ],
+ 'comments': [
+ (r'/\*\*(?!/)', Comment.Multiline, 'documentation comment'),
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ (r'//.*$', Comment.Single),
+ ],
+ 'non-comments': [
+ (r'\A#JSGF[^;]*', Comment.Preproc),
+ (r'\s+', Whitespace),
+ (r';', Punctuation),
+ (r'[=|()\[\]*+]', Operator),
+ (r'/[^/]+/', Number.Float),
+ (r'"', String.Double, 'string'),
+ (r'\{', String.Other, 'tag'),
+ (words(('import', 'public'), suffix=r'\b'), Keyword.Reserved),
+ (r'grammar\b', Keyword.Reserved, 'grammar name'),
+ (r'(<)(NULL|VOID)(>)',
+ bygroups(Punctuation, Name.Builtin, Punctuation)),
+ (r'<', Punctuation, 'rulename'),
+ (r'\w+|[^\s;=|()\[\]*+/"{<\w]+', Text),
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'\\.', String.Escape),
+ (r'[^\\"]+', String.Double),
+ ],
+ 'tag': [
+ (r'\}', String.Other, '#pop'),
+ (r'\\.', String.Escape),
+ (r'[^\\}]+', String.Other),
+ ],
+ 'grammar name': [
+ (r';', Punctuation, '#pop'),
+ (r'\s+', Whitespace),
+ (r'\.', Punctuation),
+ (r'[^;\s.]+', Name.Namespace),
+ ],
+ 'rulename': [
+ (r'>', Punctuation, '#pop'),
+ (r'\*', Punctuation),
+ (r'\s+', Whitespace),
+ (r'([^.>]+)(\s*)(\.)', bygroups(Name.Namespace, Text, Punctuation)),
+ (r'[^.>]+', Name.Constant),
+ ],
+ 'documentation comment': [
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'^(\s*)(\*?)(\s*)(@(?:example|see))(\s+)'
+ r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))',
+ bygroups(Whitespace, Comment.Multiline, Whitespace, Comment.Special,
+ Whitespace, using(this, state='example'))),
+ (r'(^\s*\*?\s*)(@\S*)',
+ bygroups(Comment.Multiline, Comment.Special)),
+ (r'[^*\n@]+|\w|\W', Comment.Multiline),
+ ],
+ 'example': [
+ (r'(\n\s*)(\*)', bygroups(Whitespace, Comment.Multiline)),
+ include('non-comments'),
+ (r'.', Comment.Multiline),
+ ],
+ }
+
+
+class PegLexer(RegexLexer):
+ """
+ This lexer is for Parsing Expression Grammars (PEG).
+
+ Various implementations of PEG have made different decisions
+ regarding the syntax, so let's try to be accommodating:
+
+ * `<-`, `←`, `:`, and `=` are all accepted as rule operators.
+
+ * Both `|` and `/` are choice operators.
+
+ * `^`, `↑`, and `~` are cut operators.
+
+ * A single `a-z` character immediately before a string, or
+ multiple `a-z` characters following a string, are part of the
+ string (e.g., `r"..."` or `"..."ilmsuxa`).
+
+ .. versionadded:: 2.6
+ """
+
+ name = 'PEG'
+ url = 'https://bford.info/pub/lang/peg.pdf'
+ aliases = ['peg']
+ filenames = ['*.peg']
+ mimetypes = ['text/x-peg']
+
+ tokens = {
+ 'root': [
+ # Comments
+ (r'#.*$', Comment.Single),
+
+ # All operators
+ (r'<-|[←:=/|&!?*+^↑~]', Operator),
+
+ # Other punctuation
+ (r'[()]', Punctuation),
+
+ # Keywords
+ (r'\.', Keyword),
+
+ # Character classes
+ (r'(\[)([^\]]*(?:\\.[^\]\\]*)*)(\])',
+ bygroups(Punctuation, String, Punctuation)),
+
+ # Single and double quoted strings (with optional modifiers)
+ (r'[a-z]?"[^"\\]*(?:\\.[^"\\]*)*"[a-z]*', String.Double),
+ (r"[a-z]?'[^'\\]*(?:\\.[^'\\]*)*'[a-z]*", String.Single),
+
+ # Nonterminals are not whitespace, operators, or punctuation
+ (r'[^\s<←:=/|&!?*+\^↑~()\[\]"\'#]+', Name.Class),
+
+ # Fallback
+ (r'.', Text),
+ ],
+ }
diff --git a/pygments/lexers/graph.py b/pygments/lexers/graph.py
new file mode 100644
index 0000000..69e5bf1
--- /dev/null
+++ b/pygments/lexers/graph.py
@@ -0,0 +1,105 @@
+"""
+ pygments.lexers.graph
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for graph query languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, words
+from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
+ String, Number, Whitespace
+
+
+__all__ = ['CypherLexer']
+
+
+class CypherLexer(RegexLexer):
+ """
+ For Cypher Query Language
+
+ For the Cypher version in Neo4j 3.3
+
+ .. versionadded:: 2.0
+ """
+ name = 'Cypher'
+ url = 'https://neo4j.com/docs/developer-manual/3.3/cypher/'
+ aliases = ['cypher']
+ filenames = ['*.cyp', '*.cypher']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ include('comment'),
+ include('clauses'),
+ include('keywords'),
+ include('relations'),
+ include('strings'),
+ include('whitespace'),
+ include('barewords'),
+ ],
+ 'comment': [
+ (r'^.*//.*$', Comment.Single),
+ ],
+ 'keywords': [
+ (r'(create|order|match|limit|set|skip|start|return|with|where|'
+ r'delete|foreach|not|by|true|false)\b', Keyword),
+ ],
+ 'clauses': [
+ # based on https://neo4j.com/docs/cypher-refcard/3.3/
+ (r'(create)(\s+)(index|unique)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(drop)(\s+)(contraint|index)(\s+)(on)\b',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (r'(ends)(\s+)(with)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(is)(\s+)(node)(\s+)(key)\b',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (r'(is)(\s+)(null|unique)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(load)(\s+)(csv)(\s+)(from)\b',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (r'(on)(\s+)(match|create)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(optional)(\s+)(match)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(order)(\s+)(by)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(starts)(\s+)(with)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(union)(\s+)(all)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(using)(\s+)(periodic)(\s+)(commit)\b',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (words((
+ 'all', 'any', 'as', 'asc', 'ascending', 'assert', 'call', 'case', 'create',
+ 'delete', 'desc', 'descending', 'distinct', 'end', 'fieldterminator',
+ 'foreach', 'in', 'limit', 'match', 'merge', 'none', 'not', 'null',
+ 'remove', 'return', 'set', 'skip', 'single', 'start', 'then', 'union',
+ 'unwind', 'yield', 'where', 'when', 'with'), suffix=r'\b'), Keyword),
+ ],
+ 'relations': [
+ (r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
+ (r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
+ (r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
+ (r'-->|<--|\[|\]', Operator),
+ (r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
+ (r'[.*{}]', Punctuation),
+ ],
+ 'strings': [
+ (r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
+ (r'`(?:``|[^`])+`', Name.Variable),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ 'barewords': [
+ (r'[a-z]\w*', Name),
+ (r'\d+', Number),
+ ],
+ }
diff --git a/pygments/lexers/graphics.py b/pygments/lexers/graphics.py
new file mode 100644
index 0000000..95e56d0
--- /dev/null
+++ b/pygments/lexers/graphics.py
@@ -0,0 +1,797 @@
+"""
+ pygments.lexers.graphics
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for computer graphics and plotting related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include, bygroups, using, \
+ this, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, \
+ Number, Punctuation, String, Whitespace
+
+__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
+ 'PovrayLexer', 'HLSLShaderLexer']
+
+
+class GLShaderLexer(RegexLexer):
+ """
+ GLSL (OpenGL Shader) lexer.
+
+ .. versionadded:: 1.1
+ """
+ name = 'GLSL'
+ aliases = ['glsl']
+ filenames = ['*.vert', '*.frag', '*.geo']
+ mimetypes = ['text/x-glslsrc']
+
+ tokens = {
+ 'root': [
+ (r'^#.*$', Comment.Preproc),
+ (r'//.*$', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
+ Operator),
+ (r'[?:]', Operator), # quick hack for ternary
+ (r'\bdefined\b', Operator),
+ (r'[;{}(),\[\]]', Punctuation),
+ # FIXME when e is present, no decimal point needed
+ (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
+ (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
+ (r'0[xX][0-9a-fA-F]*', Number.Hex),
+ (r'0[0-7]*', Number.Oct),
+ (r'[1-9][0-9]*', Number.Integer),
+ (words((
+ # Storage qualifiers
+ 'attribute', 'const', 'uniform', 'varying',
+ 'buffer', 'shared', 'in', 'out',
+ # Layout qualifiers
+ 'layout',
+ # Interpolation qualifiers
+ 'flat', 'smooth', 'noperspective',
+ # Auxiliary qualifiers
+ 'centroid', 'sample', 'patch',
+ # Parameter qualifiers. Some double as Storage qualifiers
+ 'inout',
+ # Precision qualifiers
+ 'lowp', 'mediump', 'highp', 'precision',
+ # Invariance qualifiers
+ 'invariant',
+ # Precise qualifiers
+ 'precise',
+ # Memory qualifiers
+ 'coherent', 'volatile', 'restrict', 'readonly', 'writeonly',
+ # Statements
+ 'break', 'continue', 'do', 'for', 'while', 'switch',
+ 'case', 'default', 'if', 'else', 'subroutine',
+ 'discard', 'return', 'struct'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (words((
+ # Boolean values
+ 'true', 'false'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword.Constant),
+ (words((
+ # Miscellaneous types
+ 'void', 'atomic_uint',
+ # Floating-point scalars and vectors
+ 'float', 'vec2', 'vec3', 'vec4',
+ 'double', 'dvec2', 'dvec3', 'dvec4',
+ # Integer scalars and vectors
+ 'int', 'ivec2', 'ivec3', 'ivec4',
+ 'uint', 'uvec2', 'uvec3', 'uvec4',
+ # Boolean scalars and vectors
+ 'bool', 'bvec2', 'bvec3', 'bvec4',
+ # Matrices
+ 'mat2', 'mat3', 'mat4', 'dmat2', 'dmat3', 'dmat4',
+ 'mat2x2', 'mat2x3', 'mat2x4', 'dmat2x2', 'dmat2x3', 'dmat2x4',
+ 'mat3x2', 'mat3x3', 'mat3x4', 'dmat3x2', 'dmat3x3',
+ 'dmat3x4', 'mat4x2', 'mat4x3', 'mat4x4', 'dmat4x2', 'dmat4x3', 'dmat4x4',
+ # Floating-point samplers
+ 'sampler1D', 'sampler2D', 'sampler3D', 'samplerCube',
+ 'sampler1DArray', 'sampler2DArray', 'samplerCubeArray',
+ 'sampler2DRect', 'samplerBuffer',
+ 'sampler2DMS', 'sampler2DMSArray',
+ # Shadow samplers
+ 'sampler1DShadow', 'sampler2DShadow', 'samplerCubeShadow',
+ 'sampler1DArrayShadow', 'sampler2DArrayShadow',
+ 'samplerCubeArrayShadow', 'sampler2DRectShadow',
+ # Signed integer samplers
+ 'isampler1D', 'isampler2D', 'isampler3D', 'isamplerCube',
+ 'isampler1DArray', 'isampler2DArray', 'isamplerCubeArray',
+ 'isampler2DRect', 'isamplerBuffer',
+ 'isampler2DMS', 'isampler2DMSArray',
+ # Unsigned integer samplers
+ 'usampler1D', 'usampler2D', 'usampler3D', 'usamplerCube',
+ 'usampler1DArray', 'usampler2DArray', 'usamplerCubeArray',
+ 'usampler2DRect', 'usamplerBuffer',
+ 'usampler2DMS', 'usampler2DMSArray',
+ # Floating-point image types
+ 'image1D', 'image2D', 'image3D', 'imageCube',
+ 'image1DArray', 'image2DArray', 'imageCubeArray',
+ 'image2DRect', 'imageBuffer',
+ 'image2DMS', 'image2DMSArray',
+ # Signed integer image types
+ 'iimage1D', 'iimage2D', 'iimage3D', 'iimageCube',
+ 'iimage1DArray', 'iimage2DArray', 'iimageCubeArray',
+ 'iimage2DRect', 'iimageBuffer',
+ 'iimage2DMS', 'iimage2DMSArray',
+ # Unsigned integer image types
+ 'uimage1D', 'uimage2D', 'uimage3D', 'uimageCube',
+ 'uimage1DArray', 'uimage2DArray', 'uimageCubeArray',
+ 'uimage2DRect', 'uimageBuffer',
+ 'uimage2DMS', 'uimage2DMSArray'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword.Type),
+ (words((
+ # Reserved for future use.
+ 'common', 'partition', 'active', 'asm', 'class',
+ 'union', 'enum', 'typedef', 'template', 'this',
+ 'resource', 'goto', 'inline', 'noinline', 'public',
+ 'static', 'extern', 'external', 'interface', 'long',
+ 'short', 'half', 'fixed', 'unsigned', 'superp', 'input',
+ 'output', 'hvec2', 'hvec3', 'hvec4', 'fvec2', 'fvec3',
+ 'fvec4', 'sampler3DRect', 'filter', 'sizeof', 'cast',
+ 'namespace', 'using'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ # All names beginning with "gl_" are reserved.
+ (r'gl_\w*', Name.Builtin),
+ (r'[a-zA-Z_]\w*', Name),
+ (r'\.', Punctuation),
+ (r'\s+', Whitespace),
+ ],
+ }
+
+
+class HLSLShaderLexer(RegexLexer):
+ """
+ HLSL (Microsoft Direct3D Shader) lexer.
+
+ .. versionadded:: 2.3
+ """
+ name = 'HLSL'
+ aliases = ['hlsl']
+ filenames = ['*.hlsl', '*.hlsli']
+ mimetypes = ['text/x-hlsl']
+
+ tokens = {
+ 'root': [
+ (r'^#.*$', Comment.Preproc),
+ (r'//.*$', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
+ Operator),
+ (r'[?:]', Operator), # quick hack for ternary
+ (r'\bdefined\b', Operator),
+ (r'[;{}(),.\[\]]', Punctuation),
+ # FIXME when e is present, no decimal point needed
+ (r'[+-]?\d*\.\d+([eE][-+]?\d+)?f?', Number.Float),
+ (r'[+-]?\d+\.\d*([eE][-+]?\d+)?f?', Number.Float),
+ (r'0[xX][0-9a-fA-F]*', Number.Hex),
+ (r'0[0-7]*', Number.Oct),
+ (r'[1-9][0-9]*', Number.Integer),
+ (r'"', String, 'string'),
+ (words((
+ 'asm','asm_fragment','break','case','cbuffer','centroid','class',
+ 'column_major','compile','compile_fragment','const','continue',
+ 'default','discard','do','else','export','extern','for','fxgroup',
+ 'globallycoherent','groupshared','if','in','inline','inout',
+ 'interface','line','lineadj','linear','namespace','nointerpolation',
+ 'noperspective','NULL','out','packoffset','pass','pixelfragment',
+ 'point','precise','return','register','row_major','sample',
+ 'sampler','shared','stateblock','stateblock_state','static',
+ 'struct','switch','tbuffer','technique','technique10',
+ 'technique11','texture','typedef','triangle','triangleadj',
+ 'uniform','vertexfragment','volatile','while'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (words(('true','false'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Constant),
+ (words((
+ 'auto','catch','char','const_cast','delete','dynamic_cast','enum',
+ 'explicit','friend','goto','long','mutable','new','operator',
+ 'private','protected','public','reinterpret_cast','short','signed',
+ 'sizeof','static_cast','template','this','throw','try','typename',
+ 'union','unsigned','using','virtual'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ (words((
+ 'dword','matrix','snorm','string','unorm','unsigned','void','vector',
+ 'BlendState','Buffer','ByteAddressBuffer','ComputeShader',
+ 'DepthStencilState','DepthStencilView','DomainShader',
+ 'GeometryShader','HullShader','InputPatch','LineStream',
+ 'OutputPatch','PixelShader','PointStream','RasterizerState',
+ 'RenderTargetView','RasterizerOrderedBuffer',
+ 'RasterizerOrderedByteAddressBuffer',
+ 'RasterizerOrderedStructuredBuffer','RasterizerOrderedTexture1D',
+ 'RasterizerOrderedTexture1DArray','RasterizerOrderedTexture2D',
+ 'RasterizerOrderedTexture2DArray','RasterizerOrderedTexture3D',
+ 'RWBuffer','RWByteAddressBuffer','RWStructuredBuffer',
+ 'RWTexture1D','RWTexture1DArray','RWTexture2D','RWTexture2DArray',
+ 'RWTexture3D','SamplerState','SamplerComparisonState',
+ 'StructuredBuffer','Texture1D','Texture1DArray','Texture2D',
+ 'Texture2DArray','Texture2DMS','Texture2DMSArray','Texture3D',
+ 'TextureCube','TextureCubeArray','TriangleStream','VertexShader'),
+ prefix=r'\b', suffix=r'\b'),
+ Keyword.Type),
+ (words((
+ 'bool','double','float','int','half','min16float','min10float',
+ 'min16int','min12int','min16uint','uint'),
+ prefix=r'\b', suffix=r'([1-4](x[1-4])?)?\b'),
+ Keyword.Type), # vector and matrix types
+ (words((
+ 'abort','abs','acos','all','AllMemoryBarrier',
+ 'AllMemoryBarrierWithGroupSync','any','AppendStructuredBuffer',
+ 'asdouble','asfloat','asin','asint','asuint','asuint','atan',
+ 'atan2','ceil','CheckAccessFullyMapped','clamp','clip',
+ 'CompileShader','ConsumeStructuredBuffer','cos','cosh','countbits',
+ 'cross','D3DCOLORtoUBYTE4','ddx','ddx_coarse','ddx_fine','ddy',
+ 'ddy_coarse','ddy_fine','degrees','determinant',
+ 'DeviceMemoryBarrier','DeviceMemoryBarrierWithGroupSync','distance',
+ 'dot','dst','errorf','EvaluateAttributeAtCentroid',
+ 'EvaluateAttributeAtSample','EvaluateAttributeSnapped','exp',
+ 'exp2','f16tof32','f32tof16','faceforward','firstbithigh',
+ 'firstbitlow','floor','fma','fmod','frac','frexp','fwidth',
+ 'GetRenderTargetSampleCount','GetRenderTargetSamplePosition',
+ 'GlobalOrderedCountIncrement','GroupMemoryBarrier',
+ 'GroupMemoryBarrierWithGroupSync','InterlockedAdd','InterlockedAnd',
+ 'InterlockedCompareExchange','InterlockedCompareStore',
+ 'InterlockedExchange','InterlockedMax','InterlockedMin',
+ 'InterlockedOr','InterlockedXor','isfinite','isinf','isnan',
+ 'ldexp','length','lerp','lit','log','log10','log2','mad','max',
+ 'min','modf','msad4','mul','noise','normalize','pow','printf',
+ 'Process2DQuadTessFactorsAvg','Process2DQuadTessFactorsMax',
+ 'Process2DQuadTessFactorsMin','ProcessIsolineTessFactors',
+ 'ProcessQuadTessFactorsAvg','ProcessQuadTessFactorsMax',
+ 'ProcessQuadTessFactorsMin','ProcessTriTessFactorsAvg',
+ 'ProcessTriTessFactorsMax','ProcessTriTessFactorsMin',
+ 'QuadReadLaneAt','QuadSwapX','QuadSwapY','radians','rcp',
+ 'reflect','refract','reversebits','round','rsqrt','saturate',
+ 'sign','sin','sincos','sinh','smoothstep','sqrt','step','tan',
+ 'tanh','tex1D','tex1D','tex1Dbias','tex1Dgrad','tex1Dlod',
+ 'tex1Dproj','tex2D','tex2D','tex2Dbias','tex2Dgrad','tex2Dlod',
+ 'tex2Dproj','tex3D','tex3D','tex3Dbias','tex3Dgrad','tex3Dlod',
+ 'tex3Dproj','texCUBE','texCUBE','texCUBEbias','texCUBEgrad',
+ 'texCUBElod','texCUBEproj','transpose','trunc','WaveAllBitAnd',
+ 'WaveAllMax','WaveAllMin','WaveAllBitOr','WaveAllBitXor',
+ 'WaveAllEqual','WaveAllProduct','WaveAllSum','WaveAllTrue',
+ 'WaveAnyTrue','WaveBallot','WaveGetLaneCount','WaveGetLaneIndex',
+ 'WaveGetOrderedIndex','WaveIsHelperLane','WaveOnce',
+ 'WavePrefixProduct','WavePrefixSum','WaveReadFirstLane',
+ 'WaveReadLaneAt'),
+ prefix=r'\b', suffix=r'\b'),
+ Name.Builtin), # built-in functions
+ (words((
+ 'SV_ClipDistance','SV_ClipDistance0','SV_ClipDistance1',
+ 'SV_Culldistance','SV_CullDistance0','SV_CullDistance1',
+ 'SV_Coverage','SV_Depth','SV_DepthGreaterEqual',
+ 'SV_DepthLessEqual','SV_DispatchThreadID','SV_DomainLocation',
+ 'SV_GroupID','SV_GroupIndex','SV_GroupThreadID','SV_GSInstanceID',
+ 'SV_InnerCoverage','SV_InsideTessFactor','SV_InstanceID',
+ 'SV_IsFrontFace','SV_OutputControlPointID','SV_Position',
+ 'SV_PrimitiveID','SV_RenderTargetArrayIndex','SV_SampleIndex',
+ 'SV_StencilRef','SV_TessFactor','SV_VertexID',
+ 'SV_ViewportArrayIndex'),
+ prefix=r'\b', suffix=r'\b'),
+ Name.Decorator), # system-value semantics
+ (r'\bSV_Target[0-7]?\b', Name.Decorator),
+ (words((
+ 'allow_uav_condition','branch','call','domain','earlydepthstencil',
+ 'fastopt','flatten','forcecase','instance','loop','maxtessfactor',
+ 'numthreads','outputcontrolpoints','outputtopology','partitioning',
+ 'patchconstantfunc','unroll'),
+ prefix=r'\b', suffix=r'\b'),
+ Name.Decorator), # attributes
+ (r'[a-zA-Z_]\w*', Name),
+ (r'\\$', Comment.Preproc), # backslash at end of line -- usually macro continuation
+ (r'\s+', Whitespace),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
+ r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ }
+
+
+class PostScriptLexer(RegexLexer):
+ """
+ Lexer for PostScript files.
+
+ .. versionadded:: 1.4
+ """
+ name = 'PostScript'
+ url = 'https://en.wikipedia.org/wiki/PostScript'
+ aliases = ['postscript', 'postscr']
+ filenames = ['*.ps', '*.eps']
+ mimetypes = ['application/postscript']
+
+ delimiter = r'()<>\[\]{}/%\s'
+ delimiter_end = r'(?=[%s])' % delimiter
+
+ valid_name_chars = r'[^%s]' % delimiter
+ valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
+
+ tokens = {
+ 'root': [
+ # All comment types
+ (r'^%!.+$', Comment.Preproc),
+ (r'%%.*$', Comment.Special),
+ (r'(^%.*\n){2,}', Comment.Multiline),
+ (r'%.*$', Comment.Single),
+
+ # String literals are awkward; enter separate state.
+ (r'\(', String, 'stringliteral'),
+
+ (r'[{}<>\[\]]', Punctuation),
+
+ # Numbers
+ (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
+ # Slight abuse: use Oct to signify any explicit base system
+ (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
+ r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
+ (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ + delimiter_end, Number.Float),
+ (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
+
+ # References
+ (r'\/%s' % valid_name, Name.Variable),
+
+ # Names
+ (valid_name, Name.Function), # Anything else is executed
+
+ # These keywords taken from
+ # <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
+ # Is there an authoritative list anywhere that doesn't involve
+ # trawling documentation?
+
+ (r'(false|true)' + delimiter_end, Keyword.Constant),
+
+ # Conditionals / flow control
+ (r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ + delimiter_end, Keyword.Reserved),
+
+ (words((
+ 'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
+ 'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
+ 'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
+ 'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
+ 'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
+ 'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
+ 'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
+ 'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
+ 'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
+ 'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
+ 'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
+ 'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
+ 'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
+ 'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
+ 'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
+ 'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
+ 'transform', 'translate', 'truncate', 'typecheck', 'undefined',
+ 'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
+ Name.Builtin),
+
+ (r'\s+', Whitespace),
+ ],
+
+ 'stringliteral': [
+ (r'[^()\\]+', String),
+ (r'\\', String.Escape, 'escape'),
+ (r'\(', String, '#push'),
+ (r'\)', String, '#pop'),
+ ],
+
+ 'escape': [
+ (r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class AsymptoteLexer(RegexLexer):
+ """
+ For Asymptote source code.
+
+ .. versionadded:: 1.2
+ """
+ name = 'Asymptote'
+ url = 'http://asymptote.sf.net/'
+ aliases = ['asymptote', 'asy']
+ filenames = ['*.asy']
+ mimetypes = ['text/x-asymptote']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
+
+ tokens = {
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
+ (r'//(\n|(.|\n)*?[^\\]\n)', Comment),
+ (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
+ ],
+ 'statements': [
+ # simple string (TeX friendly)
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # C style string (with character escapes)
+ (r"'", String, 'string'),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
+ (r'0[0-7]+[Ll]?', Number.Oct),
+ (r'\d+[Ll]?', Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'[()\[\],.]', Punctuation),
+ (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
+ (r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
+ r'return|break|continue|struct|typedef|new|access|import|'
+ r'unravel|from|include|quote|static|public|private|restricted|'
+ r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
+ # Since an asy-type-name can be also an asy-function-name,
+ # in the following we test if the string " [a-zA-Z]" follows
+ # the Keyword.Type.
+ # Of course it is not perfect !
+ (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
+ r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
+ r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
+ r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
+ r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
+ r'path3|pen|picture|point|position|projection|real|revolution|'
+ r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
+ r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
+ r'transformation|tree|triangle|trilinear|triple|vector|'
+ r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
+ # Now the asy-type-name which are not asy-function-name
+ # except yours !
+ # Perhaps useless
+ (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
+ r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
+ r'picture|position|real|revolution|slice|splitface|ticksgridT|'
+ r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
+ (r'[a-zA-Z_]\w*:(?!:)', Name.Label),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'root': [
+ include('whitespace'),
+ # functions
+ (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
+ r'([a-zA-Z_]\w*)' # method name
+ r'(\s*\([^;]*?\))' # signature
+ r'(' + _ws + r')(\{)',
+ bygroups(using(this), Name.Function, using(this), using(this),
+ Punctuation),
+ 'function'),
+ # function declarations
+ (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
+ r'([a-zA-Z_]\w*)' # method name
+ r'(\s*\([^;]*?\))' # signature
+ r'(' + _ws + r')(;)',
+ bygroups(using(this), Name.Function, using(this), using(this),
+ Punctuation)),
+ default('statement'),
+ ],
+ 'statement': [
+ include('whitespace'),
+ include('statements'),
+ ('[{}]', Punctuation),
+ (';', Punctuation, '#pop'),
+ ],
+ 'function': [
+ include('whitespace'),
+ include('statements'),
+ (';', Punctuation),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'string': [
+ (r"'", String, '#pop'),
+ (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'\n', String),
+ (r"[^\\'\n]+", String), # all other characters
+ (r'\\\n', String),
+ (r'\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ }
+
+ def get_tokens_unprocessed(self, text):
+ from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in ASYFUNCNAME:
+ token = Name.Function
+ elif token is Name and value in ASYVARNAME:
+ token = Name.Variable
+ yield index, token, value
+
+
+def _shortened(word):
+ dpos = word.find('$')
+ return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
+ for i in range(len(word), dpos, -1))
+
+
+def _shortened_many(*words):
+ return '|'.join(map(_shortened, words))
+
+
+class GnuplotLexer(RegexLexer):
+ """
+ For Gnuplot plotting scripts.
+
+ .. versionadded:: 0.11
+ """
+
+ name = 'Gnuplot'
+ url = 'http://gnuplot.info/'
+ aliases = ['gnuplot']
+ filenames = ['*.plot', '*.plt']
+ mimetypes = ['text/x-gnuplot']
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ (_shortened('bi$nd'), Keyword, 'bind'),
+ (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
+ (_shortened('f$it'), Keyword, 'fit'),
+ (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
+ (r'else\b', Keyword),
+ (_shortened('pa$use'), Keyword, 'pause'),
+ (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
+ (_shortened('sa$ve'), Keyword, 'save'),
+ (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
+ (_shortened_many('sh$ow', 'uns$et'),
+ Keyword, ('noargs', 'optionarg')),
+ (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
+ 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
+ 'pwd$', 're$read', 'res$et', 'scr$eendump',
+ 'she$ll', 'sy$stem', 'up$date'),
+ Keyword, 'genericargs'),
+ (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
+ 'she$ll', 'test$'),
+ Keyword, 'noargs'),
+ (r'([a-zA-Z_]\w*)(\s*)(=)',
+ bygroups(Name.Variable, Whitespace, Operator), 'genericargs'),
+ (r'([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
+ bygroups(Name.Function, Whitespace, Operator), 'genericargs'),
+ (r'@[a-zA-Z_]\w*', Name.Constant), # macros
+ (r';', Keyword),
+ ],
+ 'comment': [
+ (r'[^\\\n]', Comment),
+ (r'\\\n', Comment),
+ (r'\\', Comment),
+ # don't add the newline to the Comment token
+ default('#pop'),
+ ],
+ 'whitespace': [
+ ('#', Comment, 'comment'),
+ (r'[ \t\v\f]+', Whitespace),
+ ],
+ 'noargs': [
+ include('whitespace'),
+ # semicolon and newline end the argument list
+ (r';', Punctuation, '#pop'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'dqstring': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ (r'\n', Whitespace, '#pop'), # newline ends the string too
+ ],
+ 'sqstring': [
+ (r"''", String), # escaped single quote
+ (r"'", String, '#pop'),
+ (r"[^\\'\n]+", String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # normal backslash
+ (r'\n', Whitespace, '#pop'), # newline ends the string too
+ ],
+ 'genericargs': [
+ include('noargs'),
+ (r'"', String, 'dqstring'),
+ (r"'", String, 'sqstring'),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
+ (r'(\d+\.\d*|\.\d+)', Number.Float),
+ (r'-?\d+', Number.Integer),
+ ('[,.~!%^&*+=|?:<>/-]', Operator),
+ (r'[{}()\[\]]', Punctuation),
+ (r'(eq|ne)\b', Operator.Word),
+ (r'([a-zA-Z_]\w*)(\s*)(\()',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'[a-zA-Z_]\w*', Name),
+ (r'@[a-zA-Z_]\w*', Name.Constant), # macros
+ (r'(\\)(\n)', bygroups(Text, Whitespace)),
+ ],
+ 'optionarg': [
+ include('whitespace'),
+ (_shortened_many(
+ "a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
+ "box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
+ "data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
+ "fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
+ "hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
+ "la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
+ "mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
+ "rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
+ "mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
+ "nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
+ "mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
+ "pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
+ "poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
+ "st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
+ "ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
+ "v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
+ "yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
+ "yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
+ "x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
+ "zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
+ "x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
+ "noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
+ "xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
+ "noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
+ "cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
+ "y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
+ "vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
+ "zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
+ ],
+ 'bind': [
+ ('!', Keyword, '#pop'),
+ (_shortened('all$windows'), Name.Builtin),
+ include('genericargs'),
+ ],
+ 'quit': [
+ (r'gnuplot\b', Keyword),
+ include('noargs'),
+ ],
+ 'fit': [
+ (r'via\b', Name.Builtin),
+ include('plot'),
+ ],
+ 'if': [
+ (r'\)', Punctuation, '#pop'),
+ include('genericargs'),
+ ],
+ 'pause': [
+ (r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
+ (_shortened('key$press'), Name.Builtin),
+ include('genericargs'),
+ ],
+ 'plot': [
+ (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
+ 'mat$rix', 's$mooth', 'thru$', 't$itle',
+ 'not$itle', 'u$sing', 'w$ith'),
+ Name.Builtin),
+ include('genericargs'),
+ ],
+ 'save': [
+ (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
+ Name.Builtin),
+ include('genericargs'),
+ ],
+ }
+
+
+class PovrayLexer(RegexLexer):
+ """
+ For Persistence of Vision Raytracer files.
+
+ .. versionadded:: 0.11
+ """
+ name = 'POVRay'
+ url = 'http://www.povray.org/'
+ aliases = ['pov']
+ filenames = ['*.pov', '*.inc']
+ mimetypes = ['text/x-povray']
+
+ tokens = {
+ 'root': [
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ (r'//.*$', Comment.Single),
+ (r'(?s)"(?:\\.|[^"\\])+"', String.Double),
+ (words((
+ 'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
+ 'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
+ 'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
+ 'statistics', 'switch', 'undef', 'version', 'warning', 'while',
+ 'write'), prefix=r'#', suffix=r'\b'),
+ Comment.Preproc),
+ (words((
+ 'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
+ 'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
+ 'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
+ 'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
+ 'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
+ 'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
+ 'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
+ 'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
+ 'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
+ 'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
+ 'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
+ 'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
+ 'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
+ 'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
+ 'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
+ 'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
+ 'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
+ 'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
+ 'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
+ 'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
+ 'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
+ 'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
+ 'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
+ 'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
+ 'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
+ 'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
+ 'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
+ 'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
+ 'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
+ 'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
+ 'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
+ 'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
+ 'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
+ 'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
+ 'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
+ 'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
+ 'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
+ 'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
+ 'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
+ 't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
+ 'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
+ 'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
+ 'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
+ 'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
+ 'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
+ 'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
+ 'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
+ 'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (words((
+ 'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
+ 'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
+ 'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
+ 'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
+ 'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
+ Name.Builtin),
+ (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
+ (r'[a-zA-Z_]\w*', Name),
+ (r'[0-9]*\.[0-9]+', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'[\[\](){}<>;,]', Punctuation),
+ (r'[-+*/=.|&]|<=|>=|!=', Operator),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r'\s+', Whitespace),
+ ]
+ }
+
+ def analyse_text(text):
+ """POVRAY is similar to JSON/C, but the combination of camera and
+ light_source is probably not very likely elsewhere. HLSL or GLSL
+ are similar (GLSL even has #version), but they miss #declare, and
+ light_source/camera are not keywords anywhere else -- it's fair
+ to assume though that any POVRAY scene must have a camera and
+ lightsource."""
+ result = 0
+ if '#version' in text:
+ result += 0.05
+ if '#declare' in text:
+ result += 0.05
+ if 'camera' in text:
+ result += 0.05
+ if 'light_source' in text:
+ result += 0.1
+
+ return result
diff --git a/pygments/lexers/graphviz.py b/pygments/lexers/graphviz.py
new file mode 100644
index 0000000..c68ce2d
--- /dev/null
+++ b/pygments/lexers/graphviz.py
@@ -0,0 +1,59 @@
+"""
+ pygments.lexers.graphviz
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the DOT language (graphviz).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Keyword, Operator, Name, String, Number, \
+ Punctuation, Whitespace
+
+
+__all__ = ['GraphvizLexer']
+
+
+class GraphvizLexer(RegexLexer):
+ """
+ For graphviz DOT graph description language.
+
+ .. versionadded:: 2.8
+ """
+ name = 'Graphviz'
+ url = 'https://www.graphviz.org/doc/info/lang.html'
+ aliases = ['graphviz', 'dot']
+ filenames = ['*.gv', '*.dot']
+ mimetypes = ['text/x-graphviz', 'text/vnd.graphviz']
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(#|//).*?$', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'(?i)(node|edge|graph|digraph|subgraph|strict)\b', Keyword),
+ (r'--|->', Operator),
+ (r'[{}[\]:;,]', Punctuation),
+ (r'(\b\D\w*)(\s*)(=)(\s*)',
+ bygroups(Name.Attribute, Whitespace, Punctuation, Whitespace),
+ 'attr_id'),
+ (r'\b(n|ne|e|se|s|sw|w|nw|c|_)\b', Name.Builtin),
+ (r'\b\D\w*', Name.Tag), # node
+ (r'[-]?((\.[0-9]+)|([0-9]+(\.[0-9]*)?))', Number),
+ (r'"(\\"|[^"])*?"', Name.Tag), # quoted node
+ (r'<', Punctuation, 'xml'),
+ ],
+ 'attr_id': [
+ (r'\b\D\w*', String, '#pop'),
+ (r'[-]?((\.[0-9]+)|([0-9]+(\.[0-9]*)?))', Number, '#pop'),
+ (r'"(\\"|[^"])*?"', String.Double, '#pop'),
+ (r'<', Punctuation, ('#pop', 'xml')),
+ ],
+ 'xml': [
+ (r'<', Punctuation, '#push'),
+ (r'>', Punctuation, '#pop'),
+ (r'\s+', Whitespace),
+ (r'[^<>\s]', Name.Tag),
+ ]
+ }
diff --git a/pygments/lexers/gsql.py b/pygments/lexers/gsql.py
new file mode 100755
index 0000000..8c7674d
--- /dev/null
+++ b/pygments/lexers/gsql.py
@@ -0,0 +1,104 @@
+"""
+ pygments.lexers.gsql
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for TigerGraph GSQL graph query language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, words
+from pygments.token import Keyword, Punctuation, Comment, Operator, Name, \
+ String, Number, Whitespace
+
+__all__ = ["GSQLLexer"]
+
+
+class GSQLLexer(RegexLexer):
+
+ """
+ For GSQL queries (version 3.x).
+
+ .. versionadded:: 2.10
+ """
+
+ name = 'GSQL'
+ url = 'https://docs.tigergraph.com/dev/gsql-ref'
+ aliases = ['gsql']
+ filenames = ['*.gsql']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ include('comment'),
+ include('keywords'),
+ include('clauses'),
+ include('accums'),
+ include('relations'),
+ include('strings'),
+ include('whitespace'),
+ include('barewords'),
+ include('operators'),
+ ],
+ 'comment': [
+ (r'\#.*', Comment.Single),
+ (r'/\*(.|\n)*?\*/', Comment.Multiline),
+ ],
+ 'keywords': [
+ (words((
+ 'ACCUM', 'AND', 'ANY', 'API', 'AS', 'ASC', 'AVG', 'BAG', 'BATCH',
+ 'BETWEEN', 'BOOL', 'BOTH', 'BREAK', 'BY', 'CASE', 'CATCH', 'COALESCE',
+ 'COMPRESS', 'CONTINUE', 'COUNT', 'CREATE', 'DATETIME', 'DATETIME_ADD',
+ 'DATETIME_SUB', 'DELETE', 'DESC', 'DISTRIBUTED', 'DO', 'DOUBLE',
+ 'EDGE', 'ELSE', 'END', 'ESCAPE', 'EXCEPTION', 'FALSE', 'FILE',
+ 'FILTER', 'FLOAT', 'FOREACH', 'FOR', 'FROM', 'GRAPH', 'GROUP',
+ 'GSQL_INT_MAX', 'GSQL_INT_MIN', 'GSQL_UINT_MAX', 'HAVING', 'IF',
+ 'IN', 'INSERT', 'INT', 'INTERPRET', 'INTERSECT', 'INTERVAL', 'INTO',
+ 'IS', 'ISEMPTY', 'JSONARRAY', 'JSONOBJECT', 'LASTHOP', 'LEADING',
+ 'LIKE', 'LIMIT', 'LIST', 'LOAD_ACCUM', 'LOG', 'MAP', 'MATCH', 'MAX',
+ 'MIN', 'MINUS', 'NOT', 'NOW', 'NULL', 'OFFSET', 'OR', 'ORDER', 'PATH',
+ 'PER', 'PINNED', 'POST_ACCUM', 'POST-ACCUM', 'PRIMARY_ID', 'PRINT',
+ 'QUERY', 'RAISE', 'RANGE', 'REPLACE', 'RESET_COLLECTION_ACCUM',
+ 'RETURN', 'RETURNS', 'RUN', 'SAMPLE', 'SELECT', 'SELECT_VERTEX',
+ 'SET', 'SRC', 'STATIC', 'STRING', 'SUM', 'SYNTAX', 'TARGET',
+ 'TAGSTGT', 'THEN', 'TO', 'TO_CSV', 'TO_DATETIME', 'TRAILING',
+ 'TRIM', 'TRUE', 'TRY', 'TUPLE', 'TYPEDEF', 'UINT', 'UNION', 'UPDATE',
+ 'VALUES', 'VERTEX', 'WHEN', 'WHERE', 'WHILE', 'WITH'),
+ prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
+ ],
+ 'clauses': [
+ (words(('accum', 'having', 'limit', 'order', 'postAccum', 'sample', 'where')),
+ Name.Builtin),
+ ],
+ 'accums': [
+ (words(('andaccum', 'arrayaccum', 'avgaccum', 'bagaccum', 'bitwiseandaccum',
+ 'bitwiseoraccum', 'groupbyaccum', 'heapaccum', 'listaccum',
+ 'MapAccum', 'maxaccum', 'minaccum', 'oraccum', 'setaccum',
+ 'sumaccum')), Name.Builtin),
+ ],
+ 'relations': [
+ (r'(-\s?)(\(.*\:\w?\))(\s?-)', bygroups(Operator, using(this), Operator)),
+ (r'->|<-', Operator),
+ (r'[.*{}\[\]\<\>\_]', Punctuation),
+ ],
+ 'strings': [
+ (r'"([^"\\]|\\.)*"', String),
+ (r'@{1,2}\w+', Name.Variable),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ 'barewords': [
+ (r'[a-z]\w*', Name),
+ (r'(\d+\.\d+|\d+)', Number),
+ ],
+ 'operators': [
+ (r'\$|[^0-9|\/|\-](\-\=|\+\=|\*\=|\\\=|\=|\=\=|\=\=\=|'
+ r'\+|\-|\*|\\|\+\=|\>|\<)[^\>|\/]', Operator),
+ (r'(\||\(|\)|\,|\;|\=|\-|\+|\*|\/|\>|\<|\:)', Operator),
+ ],
+ }
diff --git a/pygments/lexers/haskell.py b/pygments/lexers/haskell.py
new file mode 100644
index 0000000..6b11ca5
--- /dev/null
+++ b/pygments/lexers/haskell.py
@@ -0,0 +1,871 @@
+"""
+ pygments.lexers.haskell
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Haskell and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
+ default, include, inherit, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+from pygments import unistring as uni
+
+__all__ = ['HaskellLexer', 'HspecLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer',
+ 'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer',
+ 'LiterateCryptolLexer', 'KokaLexer']
+
+
+class HaskellLexer(RegexLexer):
+ """
+ A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
+
+ .. versionadded:: 0.8
+ """
+ name = 'Haskell'
+ url = 'https://www.haskell.org/'
+ aliases = ['haskell', 'hs']
+ filenames = ['*.hs']
+ mimetypes = ['text/x-haskell']
+
+ reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else',
+ 'family', 'if', 'in', 'infix[lr]?', 'instance',
+ 'let', 'newtype', 'of', 'then', 'type', 'where', '_')
+ ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
+ 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
+ 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
+ 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
+
+ tokens = {
+ 'root': [
+ # Whitespace:
+ (r'\s+', Whitespace),
+ # (r'--\s*|.*$', Comment.Doc),
+ (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
+ (r'\{-', Comment.Multiline, 'comment'),
+ # Lexemes:
+ # Identifiers
+ (r'\bimport\b', Keyword.Reserved, 'import'),
+ (r'\bmodule\b', Keyword.Reserved, 'module'),
+ (r'\berror\b', Name.Exception),
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r"'[^\\]'", String.Char), # this has to come before the TH quote
+ (r'^[_' + uni.Ll + r'][\w\']*', Name.Function),
+ (r"'?[_" + uni.Ll + r"][\w']*", Name),
+ (r"('')?[" + uni.Lu + r"][\w\']*", Keyword.Type),
+ (r"(')[" + uni.Lu + r"][\w\']*", Keyword.Type),
+ (r"(')\[[^\]]*\]", Keyword.Type), # tuples and lists get special treatment in GHC
+ (r"(')\([^)]*\)", Keyword.Type), # ..
+ (r"(')[:!#$%&*+.\\/<=>?@^|~-]+", Keyword.Type), # promoted type operators
+ # Operators
+ (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
+ (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
+ (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
+ # Numbers
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*_*[pP][+-]?\d(_*\d)*', Number.Float),
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*\.[\da-fA-F](_*[\da-fA-F])*'
+ r'(_*[pP][+-]?\d(_*\d)*)?', Number.Float),
+ (r'\d(_*\d)*_*[eE][+-]?\d(_*\d)*', Number.Float),
+ (r'\d(_*\d)*\.\d(_*\d)*(_*[eE][+-]?\d(_*\d)*)?', Number.Float),
+ (r'0[bB]_*[01](_*[01])*', Number.Bin),
+ (r'0[oO]_*[0-7](_*[0-7])*', Number.Oct),
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*', Number.Hex),
+ (r'\d(_*\d)*', Number.Integer),
+ # Character/String Literals
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ # Special
+ (r'\[\]', Keyword.Type),
+ (r'\(\)', Name.Builtin),
+ (r'[][(),;`{}]', Punctuation),
+ ],
+ 'import': [
+ # Import statements
+ (r'\s+', Whitespace),
+ (r'"', String, 'string'),
+ # after "funclist" state
+ (r'\)', Punctuation, '#pop'),
+ (r'qualified\b', Keyword),
+ # import X as Y
+ (r'([' + uni.Lu + r'][\w.]*)(\s+)(as)(\s+)([' + uni.Lu + r'][\w.]*)',
+ bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'),
+ # import X hiding (functions)
+ (r'([' + uni.Lu + r'][\w.]*)(\s+)(hiding)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'),
+ # import X (functions)
+ (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
+ # import X
+ (r'[\w.]+', Name.Namespace, '#pop'),
+ ],
+ 'module': [
+ (r'\s+', Whitespace),
+ (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
+ (r'[' + uni.Lu + r'][\w.]*', Name.Namespace, '#pop'),
+ ],
+ 'funclist': [
+ (r'\s+', Whitespace),
+ (r'[' + uni.Lu + r']\w*', Keyword.Type),
+ (r'(_[\w\']+|[' + uni.Ll + r'][\w\']*)', Name.Function),
+ (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
+ (r'\{-', Comment.Multiline, 'comment'),
+ (r',', Punctuation),
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
+ # (HACK, but it makes sense to push two instances, believe me)
+ (r'\(', Punctuation, ('funclist', 'funclist')),
+ (r'\)', Punctuation, '#pop:2'),
+ ],
+ # NOTE: the next four states are shared in the AgdaLexer; make sure
+ # any change is compatible with Agda as well or copy over and change
+ 'comment': [
+ # Multiline Comments
+ (r'[^-{}]+', Comment.Multiline),
+ (r'\{-', Comment.Multiline, '#push'),
+ (r'-\}', Comment.Multiline, '#pop'),
+ (r'[-{}]', Comment.Multiline),
+ ],
+ 'character': [
+ # Allows multi-chars, incorrectly.
+ (r"[^\\']'", String.Char, '#pop'),
+ (r"\\", String.Escape, 'escape'),
+ ("'", String.Char, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r"\\", String.Escape, 'escape'),
+ ('"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
+ (r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'),
+ ('|'.join(ascii), String.Escape, '#pop'),
+ (r'o[0-7]+', String.Escape, '#pop'),
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
+ (r'\d+', String.Escape, '#pop'),
+ (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'),
+ ],
+ }
+
+
+class HspecLexer(HaskellLexer):
+ """
+ A Haskell lexer with support for Hspec constructs.
+
+ .. versionadded:: 2.4.0
+ """
+
+ name = 'Hspec'
+ aliases = ['hspec']
+ filenames = ['*Spec.hs']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'(it)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)),
+ (r'(describe)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)),
+ (r'(context)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)),
+ inherit,
+ ],
+ }
+
+
+class IdrisLexer(RegexLexer):
+ """
+ A lexer for the dependently typed programming language Idris.
+
+ Based on the Haskell and Agda Lexer.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Idris'
+ url = 'https://www.idris-lang.org/'
+ aliases = ['idris', 'idr']
+ filenames = ['*.idr']
+ mimetypes = ['text/x-idris']
+
+ reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else',
+ 'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto',
+ 'namespace', 'codata', 'mutual', 'private', 'public', 'abstract',
+ 'total', 'partial',
+ 'interface', 'implementation', 'export', 'covering', 'constructor',
+ 'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with',
+ 'pattern', 'term', 'syntax', 'prefix',
+ 'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit',
+ 'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial')
+
+ ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
+ 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
+ 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
+ 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
+
+ directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access',
+ 'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language')
+
+ tokens = {
+ 'root': [
+ # Comments
+ (r'^(\s*)(%%(%s))' % '|'.join(directives),
+ bygroups(Whitespace, Keyword.Reserved)),
+ (r'(\s*)(--(?![!#$%&*+./<=>?@^|_~:\\]).*?)$', bygroups(Whitespace, Comment.Single)),
+ (r'(\s*)(\|{3}.*?)$', bygroups(Whitespace, Comment.Single)),
+ (r'(\s*)(\{-)', bygroups(Whitespace, Comment.Multiline), 'comment'),
+ # Declaration
+ (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
+ bygroups(Whitespace, Name.Function, Whitespace, Operator.Word, Whitespace)),
+ # Identifiers
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Whitespace), 'module'),
+ (r"('')?[A-Z][\w\']*", Keyword.Type),
+ (r'[a-z][\w\']*', Text),
+ # Special Symbols
+ (r'(<-|::|->|=>|=)', Operator.Word), # specials
+ (r'([(){}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
+ # Numbers
+ (r'\d+[eE][+-]?\d+', Number.Float),
+ (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ # Strings
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ (r'[^\s(){}]+', Text),
+ (r'\s+?', Whitespace), # Whitespace
+ ],
+ 'module': [
+ (r'\s+', Whitespace),
+ (r'([A-Z][\w.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
+ (r'[A-Z][\w.]*', Name.Namespace, '#pop'),
+ ],
+ 'funclist': [
+ (r'\s+', Whitespace),
+ (r'[A-Z]\w*', Keyword.Type),
+ (r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
+ (r'--.*$', Comment.Single),
+ (r'\{-', Comment.Multiline, 'comment'),
+ (r',', Punctuation),
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
+ # (HACK, but it makes sense to push two instances, believe me)
+ (r'\(', Punctuation, ('funclist', 'funclist')),
+ (r'\)', Punctuation, '#pop:2'),
+ ],
+ # NOTE: the next four states are shared in the AgdaLexer; make sure
+ # any change is compatible with Agda as well or copy over and change
+ 'comment': [
+ # Multiline Comments
+ (r'[^-{}]+', Comment.Multiline),
+ (r'\{-', Comment.Multiline, '#push'),
+ (r'-\}', Comment.Multiline, '#pop'),
+ (r'[-{}]', Comment.Multiline),
+ ],
+ 'character': [
+ # Allows multi-chars, incorrectly.
+ (r"[^\\']", String.Char),
+ (r"\\", String.Escape, 'escape'),
+ ("'", String.Char, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r"\\", String.Escape, 'escape'),
+ ('"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
+ (r'\^[][A-Z@^_]', String.Escape, '#pop'),
+ ('|'.join(ascii), String.Escape, '#pop'),
+ (r'o[0-7]+', String.Escape, '#pop'),
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
+ (r'\d+', String.Escape, '#pop'),
+ (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop')
+ ],
+ }
+
+
+class AgdaLexer(RegexLexer):
+ """
+ For the Agda dependently typed functional programming language and
+ proof assistant.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Agda'
+ url = 'http://wiki.portal.chalmers.se/agda/pmwiki.php'
+ aliases = ['agda']
+ filenames = ['*.agda']
+ mimetypes = ['text/x-agda']
+
+ reserved = (
+ 'abstract', 'codata', 'coinductive', 'constructor', 'data', 'do',
+ 'eta-equality', 'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
+ 'infixl', 'infixr', 'instance', 'interleaved', 'let', 'macro', 'mutual',
+ 'no-eta-equality', 'open', 'overlap', 'pattern', 'postulate', 'primitive',
+ 'private', 'quote', 'quoteTerm', 'record', 'renaming', 'rewrite',
+ 'syntax', 'tactic', 'unquote', 'unquoteDecl', 'unquoteDef', 'using',
+ 'variable', 'where', 'with',
+ )
+
+ tokens = {
+ 'root': [
+ # Declaration
+ (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
+ bygroups(Whitespace, Name.Function, Whitespace,
+ Operator.Word, Whitespace)),
+ # Comments
+ (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
+ (r'\{-', Comment.Multiline, 'comment'),
+ # Holes
+ (r'\{!', Comment.Directive, 'hole'),
+ # Lexemes:
+ # Identifiers
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Whitespace),
+ 'module'),
+ (r'\b(Set|Prop)[\u2080-\u2089]*\b', Keyword.Type),
+ # Special Symbols
+ (r'(\(|\)|\{|\})', Operator),
+ (r'(\.{1,3}|\||\u03BB|\u2200|\u2192|:|=|->)', Operator.Word),
+ # Numbers
+ (r'\d+[eE][+-]?\d+', Number.Float),
+ (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ # Strings
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ (r'[^\s(){}]+', Text),
+ (r'\s+?', Whitespace), # Whitespace
+ ],
+ 'hole': [
+ # Holes
+ (r'[^!{}]+', Comment.Directive),
+ (r'\{!', Comment.Directive, '#push'),
+ (r'!\}', Comment.Directive, '#pop'),
+ (r'[!{}]', Comment.Directive),
+ ],
+ 'module': [
+ (r'\{-', Comment.Multiline, 'comment'),
+ (r'[a-zA-Z][\w.\']*', Name, '#pop'),
+ (r'[\W0-9_]+', Text)
+ ],
+ 'comment': HaskellLexer.tokens['comment'],
+ 'character': HaskellLexer.tokens['character'],
+ 'string': HaskellLexer.tokens['string'],
+ 'escape': HaskellLexer.tokens['escape']
+ }
+
+
+class CryptolLexer(RegexLexer):
+ """
+ FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Cryptol'
+ aliases = ['cryptol', 'cry']
+ filenames = ['*.cry']
+ mimetypes = ['text/x-cryptol']
+
+ reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else',
+ 'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2',
+ 'max', 'min', 'module', 'newtype', 'pragma', 'property',
+ 'then', 'type', 'where', 'width')
+ ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
+ 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
+ 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
+ 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
+
+ tokens = {
+ 'root': [
+ # Whitespace:
+ (r'\s+', Whitespace),
+ # (r'--\s*|.*$', Comment.Doc),
+ (r'//.*$', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+ # Lexemes:
+ # Identifiers
+ (r'\bimport\b', Keyword.Reserved, 'import'),
+ (r'\bmodule\b', Keyword.Reserved, 'module'),
+ (r'\berror\b', Name.Exception),
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'^[_a-z][\w\']*', Name.Function),
+ (r"'?[_a-z][\w']*", Name),
+ (r"('')?[A-Z][\w\']*", Keyword.Type),
+ # Operators
+ (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
+ (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
+ (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
+ # Numbers
+ (r'\d+[eE][+-]?\d+', Number.Float),
+ (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
+ (r'0[oO][0-7]+', Number.Oct),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ # Character/String Literals
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ # Special
+ (r'\[\]', Keyword.Type),
+ (r'\(\)', Name.Builtin),
+ (r'[][(),;`{}]', Punctuation),
+ ],
+ 'import': [
+ # Import statements
+ (r'\s+', Whitespace),
+ (r'"', String, 'string'),
+ # after "funclist" state
+ (r'\)', Punctuation, '#pop'),
+ (r'qualified\b', Keyword),
+ # import X as Y
+ (r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)',
+ bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'),
+ # import X hiding (functions)
+ (r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'),
+ # import X (functions)
+ (r'([A-Z][\w.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
+ # import X
+ (r'[\w.]+', Name.Namespace, '#pop'),
+ ],
+ 'module': [
+ (r'\s+', Whitespace),
+ (r'([A-Z][\w.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
+ (r'[A-Z][\w.]*', Name.Namespace, '#pop'),
+ ],
+ 'funclist': [
+ (r'\s+', Whitespace),
+ (r'[A-Z]\w*', Keyword.Type),
+ (r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
+ # TODO: these don't match the comments in docs, remove.
+ # (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
+ # (r'{-', Comment.Multiline, 'comment'),
+ (r',', Punctuation),
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
+ # (HACK, but it makes sense to push two instances, believe me)
+ (r'\(', Punctuation, ('funclist', 'funclist')),
+ (r'\)', Punctuation, '#pop:2'),
+ ],
+ 'comment': [
+ # Multiline Comments
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'character': [
+ # Allows multi-chars, incorrectly.
+ (r"[^\\']'", String.Char, '#pop'),
+ (r"\\", String.Escape, 'escape'),
+ ("'", String.Char, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r"\\", String.Escape, 'escape'),
+ ('"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
+ (r'\^[][A-Z@^_]', String.Escape, '#pop'),
+ ('|'.join(ascii), String.Escape, '#pop'),
+ (r'o[0-7]+', String.Escape, '#pop'),
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
+ (r'\d+', String.Escape, '#pop'),
+ (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'),
+ ],
+ }
+
+ EXTRA_KEYWORDS = {'join', 'split', 'reverse', 'transpose', 'width',
+ 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const',
+ 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error',
+ 'trace'}
+
+ def get_tokens_unprocessed(self, text):
+ stack = ['root']
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Name.Builtin, value
+ else:
+ yield index, token, value
+
+
+class LiterateLexer(Lexer):
+ """
+ Base class for lexers of literate file formats based on LaTeX or Bird-style
+ (prefixing each code line with ">").
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+ """
+
+ bird_re = re.compile(r'(>[ \t]*)(.*\n)')
+
+ def __init__(self, baselexer, **options):
+ self.baselexer = baselexer
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ style = self.options.get('litstyle')
+ if style is None:
+ style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
+
+ code = ''
+ insertions = []
+ if style == 'bird':
+ # bird-style
+ for match in line_re.finditer(text):
+ line = match.group()
+ m = self.bird_re.match(line)
+ if m:
+ insertions.append((len(code),
+ [(0, Comment.Special, m.group(1))]))
+ code += m.group(2)
+ else:
+ insertions.append((len(code), [(0, Text, line)]))
+ else:
+ # latex-style
+ from pygments.lexers.markup import TexLexer
+ lxlexer = TexLexer(**self.options)
+ codelines = 0
+ latex = ''
+ for match in line_re.finditer(text):
+ line = match.group()
+ if codelines:
+ if line.lstrip().startswith('\\end{code}'):
+ codelines = 0
+ latex += line
+ else:
+ code += line
+ elif line.lstrip().startswith('\\begin{code}'):
+ codelines = 1
+ latex += line
+ insertions.append((len(code),
+ list(lxlexer.get_tokens_unprocessed(latex))))
+ latex = ''
+ else:
+ latex += line
+ insertions.append((len(code),
+ list(lxlexer.get_tokens_unprocessed(latex))))
+ yield from do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code))
+
+
+class LiterateHaskellLexer(LiterateLexer):
+ """
+ For Literate Haskell (Bird-style or LaTeX) source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 0.9
+ """
+ name = 'Literate Haskell'
+ aliases = ['literate-haskell', 'lhaskell', 'lhs']
+ filenames = ['*.lhs']
+ mimetypes = ['text/x-literate-haskell']
+
+ def __init__(self, **options):
+ hslexer = HaskellLexer(**options)
+ LiterateLexer.__init__(self, hslexer, **options)
+
+
+class LiterateIdrisLexer(LiterateLexer):
+ """
+ For Literate Idris (Bird-style or LaTeX) source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Literate Idris'
+ aliases = ['literate-idris', 'lidris', 'lidr']
+ filenames = ['*.lidr']
+ mimetypes = ['text/x-literate-idris']
+
+ def __init__(self, **options):
+ hslexer = IdrisLexer(**options)
+ LiterateLexer.__init__(self, hslexer, **options)
+
+
+class LiterateAgdaLexer(LiterateLexer):
+ """
+ For Literate Agda source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Literate Agda'
+ aliases = ['literate-agda', 'lagda']
+ filenames = ['*.lagda']
+ mimetypes = ['text/x-literate-agda']
+
+ def __init__(self, **options):
+ agdalexer = AgdaLexer(**options)
+ LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
+
+
+class LiterateCryptolLexer(LiterateLexer):
+ """
+ For Literate Cryptol (Bird-style or LaTeX) source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Literate Cryptol'
+ aliases = ['literate-cryptol', 'lcryptol', 'lcry']
+ filenames = ['*.lcry']
+ mimetypes = ['text/x-literate-cryptol']
+
+ def __init__(self, **options):
+ crylexer = CryptolLexer(**options)
+ LiterateLexer.__init__(self, crylexer, **options)
+
+
+class KokaLexer(RegexLexer):
+ """
+ Lexer for the Koka language.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Koka'
+ url = 'https://koka-lang.github.io/koka/doc/index.html'
+ aliases = ['koka']
+ filenames = ['*.kk', '*.kki']
+ mimetypes = ['text/x-koka']
+
+ keywords = [
+ 'infix', 'infixr', 'infixl',
+ 'type', 'cotype', 'rectype', 'alias',
+ 'struct', 'con',
+ 'fun', 'function', 'val', 'var',
+ 'external',
+ 'if', 'then', 'else', 'elif', 'return', 'match',
+ 'private', 'public', 'private',
+ 'module', 'import', 'as',
+ 'include', 'inline',
+ 'rec',
+ 'try', 'yield', 'enum',
+ 'interface', 'instance',
+ ]
+
+ # keywords that are followed by a type
+ typeStartKeywords = [
+ 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum',
+ ]
+
+ # keywords valid in a type
+ typekeywords = [
+ 'forall', 'exists', 'some', 'with',
+ ]
+
+ # builtin names and special names
+ builtin = [
+ 'for', 'while', 'repeat',
+ 'foreach', 'foreach-indexed',
+ 'error', 'catch', 'finally',
+ 'cs', 'js', 'file', 'ref', 'assigned',
+ ]
+
+ # symbols that can be in an operator
+ symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+'
+
+ # symbol boundary: an operator keyword should not be followed by any of these
+ sboundary = '(?!' + symbols + ')'
+
+ # name boundary: a keyword should not be followed by any of these
+ boundary = r'(?![\w/])'
+
+ # koka token abstractions
+ tokenType = Name.Attribute
+ tokenTypeDef = Name.Class
+ tokenConstructor = Generic.Emph
+
+ # main lexer
+ tokens = {
+ 'root': [
+ include('whitespace'),
+
+ # go into type mode
+ (r'::?' + sboundary, tokenType, 'type'),
+ (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef),
+ 'alias-type'),
+ (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef),
+ 'struct-type'),
+ ((r'(%s)' % '|'.join(typeStartKeywords)) +
+ r'(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef),
+ 'type'),
+
+ # special sequences of tokens (we use ?: for non-capturing group as
+ # required by 'bygroups')
+ (r'(module)(\s+)(interface(?=\s))?(\s+)?((?:[a-z]\w*/)*[a-z]\w*)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Namespace)),
+ (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)'
+ r'(?:(\s*)(=)(\s*)(qualified)?(\s*)'
+ r'((?:[a-z]\w*/)*[a-z]\w*))?',
+ bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, Keyword, Whitespace,
+ Keyword, Whitespace, Name.Namespace)),
+
+ (r'^(public|private)?(\s+)?(function|fun|val)'
+ r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function)),
+ (r'^(?:(public|private)(?=\s+external))?((?<!^)\s+)?(external)(\s+)(inline(?=\s))?(\s+)?'
+ r'([a-z]\w*|\((?:' + symbols + r'|/)\))',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Name.Function)),
+
+ # keywords
+ (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
+ (r'(%s)' % '|'.join(keywords) + boundary, Keyword),
+ (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),
+ (r'::?|:=|\->|[=.]' + sboundary, Keyword),
+
+ # names
+ (r'((?:[a-z]\w*/)*)([A-Z]\w*)',
+ bygroups(Name.Namespace, tokenConstructor)),
+ (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)),
+ (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))',
+ bygroups(Name.Namespace, Name)),
+ (r'_\w*', Name.Variable),
+
+ # literal string
+ (r'@"', String.Double, 'litstring'),
+
+ # operators
+ (symbols + "|/(?![*/])", Operator),
+ (r'`', Operator),
+ (r'[{}()\[\];,]', Punctuation),
+
+ # literals. No check for literal characters with len > 1
+ (r'[0-9]+\.[0-9]+([eE][\-+]?[0-9]+)?', Number.Float),
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+
+ (r"'", String.Char, 'char'),
+ (r'"', String.Double, 'string'),
+ ],
+
+ # type started by alias
+ 'alias-type': [
+ (r'=', Keyword),
+ include('type')
+ ],
+
+ # type started by struct
+ 'struct-type': [
+ (r'(?=\((?!,*\)))', Punctuation, '#pop'),
+ include('type')
+ ],
+
+ # type started by colon
+ 'type': [
+ (r'[(\[<]', tokenType, 'type-nested'),
+ include('type-content')
+ ],
+
+ # type nested in brackets: can contain parameters, comma etc.
+ 'type-nested': [
+ (r'[)\]>]', tokenType, '#pop'),
+ (r'[(\[<]', tokenType, 'type-nested'),
+ (r',', tokenType),
+ (r'([a-z]\w*)(\s*)(:)(?!:)',
+ bygroups(Name, Whitespace, tokenType)), # parameter name
+ include('type-content')
+ ],
+
+ # shared contents of a type
+ 'type-content': [
+ include('whitespace'),
+
+ # keywords
+ (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),
+ (r'(?=((%s)' % '|'.join(keywords) + boundary + '))',
+ Keyword, '#pop'), # need to match because names overlap...
+
+ # kinds
+ (r'[EPHVX]' + boundary, tokenType),
+
+ # type names
+ (r'[a-z][0-9]*(?![\w/])', tokenType),
+ (r'_\w*', tokenType.Variable), # Generic.Emph
+ (r'((?:[a-z]\w*/)*)([A-Z]\w*)',
+ bygroups(Name.Namespace, tokenType)),
+ (r'((?:[a-z]\w*/)*)([a-z]\w+)',
+ bygroups(Name.Namespace, tokenType)),
+
+ # type keyword operators
+ (r'::|->|[.:|]', tokenType),
+
+ # catchall
+ default('#pop')
+ ],
+
+ # comments and literals
+ 'whitespace': [
+ (r'(\n\s*)(#.*)$', bygroups(Whitespace, Comment.Preproc)),
+ (r'\s+', Whitespace),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*$', Comment.Single)
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'litstring': [
+ (r'[^"]+', String.Double),
+ (r'""', String.Escape),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"\n]+', String.Double),
+ include('escape-sequence'),
+ (r'["\n]', String.Double, '#pop'),
+ ],
+ 'char': [
+ (r'[^\\\'\n]+', String.Char),
+ include('escape-sequence'),
+ (r'[\'\n]', String.Char, '#pop'),
+ ],
+ 'escape-sequence': [
+ (r'\\[nrt\\"\']', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ # Yes, \U literals are 6 hex digits.
+ (r'\\U[0-9a-fA-F]{6}', String.Escape)
+ ]
+ }
diff --git a/pygments/lexers/haxe.py b/pygments/lexers/haxe.py
new file mode 100644
index 0000000..1d9c6f5
--- /dev/null
+++ b/pygments/lexers/haxe.py
@@ -0,0 +1,937 @@
+"""
+ pygments.lexers.haxe
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Haxe and related stuff.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
+ default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+
+__all__ = ['HaxeLexer', 'HxmlLexer']
+
+
+class HaxeLexer(ExtendedRegexLexer):
+ """
+ For Haxe source code.
+
+ .. versionadded:: 1.3
+ """
+
+ name = 'Haxe'
+ url = 'http://haxe.org/'
+ aliases = ['haxe', 'hxsl', 'hx']
+ filenames = ['*.hx', '*.hxsl']
+ mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx']
+
+ # keywords extracted from lexer.mll in the haxe compiler source
+ keyword = (r'(?:function|class|static|var|if|else|while|do|for|'
+ r'break|return|continue|extends|implements|import|'
+ r'switch|case|default|public|private|try|untyped|'
+ r'catch|new|this|throw|extern|enum|in|interface|'
+ r'cast|override|dynamic|typedef|package|'
+ r'inline|using|null|true|false|abstract)\b')
+
+ # idtype in lexer.mll
+ typeid = r'_*[A-Z]\w*'
+
+ # combined ident and dollar and idtype
+ ident = r'(?:_*[a-z]\w*|_+[0-9]\w*|' + typeid + r'|_+|\$\w+)'
+
+ binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|'
+ r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|'
+ r'/|\-|=>|=)')
+
+ # ident except keywords
+ ident_no_keyword = r'(?!' + keyword + ')' + ident
+
+ flags = re.DOTALL | re.MULTILINE
+
+ preproc_stack = []
+
+ def preproc_callback(self, match, ctx):
+ proc = match.group(2)
+
+ if proc == 'if':
+ # store the current stack
+ self.preproc_stack.append(ctx.stack[:])
+ elif proc in ['else', 'elseif']:
+ # restore the stack back to right before #if
+ if self.preproc_stack:
+ ctx.stack = self.preproc_stack[-1][:]
+ elif proc == 'end':
+ # remove the saved stack of previous #if
+ if self.preproc_stack:
+ self.preproc_stack.pop()
+
+ # #if and #elseif should follow by an expr
+ if proc in ['if', 'elseif']:
+ ctx.stack.append('preproc-expr')
+
+ # #error can be optionally follow by the error msg
+ if proc in ['error']:
+ ctx.stack.append('preproc-error')
+
+ yield match.start(), Comment.Preproc, '#' + proc
+ ctx.pos = match.end()
+
+ tokens = {
+ 'root': [
+ include('spaces'),
+ include('meta'),
+ (r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')),
+ (r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')),
+ (r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')),
+ (r'(?:extern|private)\b', Keyword.Declaration),
+ (r'(?:abstract)\b', Keyword.Declaration, 'abstract'),
+ (r'(?:class|interface)\b', Keyword.Declaration, 'class'),
+ (r'(?:enum)\b', Keyword.Declaration, 'enum'),
+ (r'(?:typedef)\b', Keyword.Declaration, 'typedef'),
+
+ # top-level expression
+ # although it is not supported in haxe, but it is common to write
+ # expression in web pages the positive lookahead here is to prevent
+ # an infinite loop at the EOF
+ (r'(?=.)', Text, 'expr-statement'),
+ ],
+
+ # space/tab/comment/preproc
+ 'spaces': [
+ (r'\s+', Whitespace),
+ (r'//[^\n\r]*', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'(#)(if|elseif|else|end|error)\b', preproc_callback),
+ ],
+
+ 'string-single-interpol': [
+ (r'\$\{', String.Interpol, ('string-interpol-close', 'expr')),
+ (r'\$\$', String.Escape),
+ (r'\$(?=' + ident + ')', String.Interpol, 'ident'),
+ include('string-single'),
+ ],
+
+ 'string-single': [
+ (r"'", String.Single, '#pop'),
+ (r'\\.', String.Escape),
+ (r'.', String.Single),
+ ],
+
+ 'string-double': [
+ (r'"', String.Double, '#pop'),
+ (r'\\.', String.Escape),
+ (r'.', String.Double),
+ ],
+
+ 'string-interpol-close': [
+ (r'\$'+ident, String.Interpol),
+ (r'\}', String.Interpol, '#pop'),
+ ],
+
+ 'package': [
+ include('spaces'),
+ (ident, Name.Namespace),
+ (r'\.', Punctuation, 'import-ident'),
+ default('#pop'),
+ ],
+
+ 'import': [
+ include('spaces'),
+ (ident, Name.Namespace),
+ (r'\*', Keyword), # wildcard import
+ (r'\.', Punctuation, 'import-ident'),
+ (r'in', Keyword.Namespace, 'ident'),
+ default('#pop'),
+ ],
+
+ 'import-ident': [
+ include('spaces'),
+ (r'\*', Keyword, '#pop'), # wildcard import
+ (ident, Name.Namespace, '#pop'),
+ ],
+
+ 'using': [
+ include('spaces'),
+ (ident, Name.Namespace),
+ (r'\.', Punctuation, 'import-ident'),
+ default('#pop'),
+ ],
+
+ 'preproc-error': [
+ (r'\s+', Whitespace),
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+ default('#pop'),
+ ],
+
+ 'preproc-expr': [
+ (r'\s+', Whitespace),
+ (r'\!', Comment.Preproc),
+ (r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')),
+
+ (ident, Comment.Preproc, '#pop'),
+
+ # Float
+ (r'\.[0-9]+', Number.Float),
+ (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float),
+ (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float),
+ (r'[0-9]+\.[0-9]+', Number.Float),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float),
+
+ # Int
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+
+ # String
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+ ],
+
+ 'preproc-parenthesis': [
+ (r'\s+', Whitespace),
+ (r'\)', Comment.Preproc, '#pop'),
+ default('preproc-expr-in-parenthesis'),
+ ],
+
+ 'preproc-expr-chain': [
+ (r'\s+', Whitespace),
+ (binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')),
+ default('#pop'),
+ ],
+
+ # same as 'preproc-expr' but able to chain 'preproc-expr-chain'
+ 'preproc-expr-in-parenthesis': [
+ (r'\s+', Whitespace),
+ (r'\!', Comment.Preproc),
+ (r'\(', Comment.Preproc,
+ ('#pop', 'preproc-expr-chain', 'preproc-parenthesis')),
+
+ (ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')),
+
+ # Float
+ (r'\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
+ (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
+ (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
+ (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float, ('#pop', 'preproc-expr-chain')),
+
+ # Int
+ (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'preproc-expr-chain')),
+ (r'[0-9]+', Number.Integer, ('#pop', 'preproc-expr-chain')),
+
+ # String
+ (r"'", String.Single,
+ ('#pop', 'preproc-expr-chain', 'string-single')),
+ (r'"', String.Double,
+ ('#pop', 'preproc-expr-chain', 'string-double')),
+ ],
+
+ 'abstract': [
+ include('spaces'),
+ default(('#pop', 'abstract-body', 'abstract-relation',
+ 'abstract-opaque', 'type-param-constraint', 'type-name')),
+ ],
+
+ 'abstract-body': [
+ include('spaces'),
+ (r'\{', Punctuation, ('#pop', 'class-body')),
+ ],
+
+ 'abstract-opaque': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')),
+ default('#pop'),
+ ],
+
+ 'abstract-relation': [
+ include('spaces'),
+ (r'(?:to|from)', Keyword.Declaration, 'type'),
+ (r',', Punctuation),
+ default('#pop'),
+ ],
+
+ 'meta': [
+ include('spaces'),
+ (r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')),
+ ],
+
+ # optional colon
+ 'meta-colon': [
+ include('spaces'),
+ (r':', Name.Decorator, '#pop'),
+ default('#pop'),
+ ],
+
+ # same as 'ident' but set token as Name.Decorator instead of Name
+ 'meta-ident': [
+ include('spaces'),
+ (ident, Name.Decorator, '#pop'),
+ ],
+
+ 'meta-body': [
+ include('spaces'),
+ (r'\(', Name.Decorator, ('#pop', 'meta-call')),
+ default('#pop'),
+ ],
+
+ 'meta-call': [
+ include('spaces'),
+ (r'\)', Name.Decorator, '#pop'),
+ default(('#pop', 'meta-call-sep', 'expr')),
+ ],
+
+ 'meta-call-sep': [
+ include('spaces'),
+ (r'\)', Name.Decorator, '#pop'),
+ (r',', Punctuation, ('#pop', 'meta-call')),
+ ],
+
+ 'typedef': [
+ include('spaces'),
+ default(('#pop', 'typedef-body', 'type-param-constraint',
+ 'type-name')),
+ ],
+
+ 'typedef-body': [
+ include('spaces'),
+ (r'=', Operator, ('#pop', 'optional-semicolon', 'type')),
+ ],
+
+ 'enum': [
+ include('spaces'),
+ default(('#pop', 'enum-body', 'bracket-open',
+ 'type-param-constraint', 'type-name')),
+ ],
+
+ 'enum-body': [
+ include('spaces'),
+ include('meta'),
+ (r'\}', Punctuation, '#pop'),
+ (ident_no_keyword, Name, ('enum-member', 'type-param-constraint')),
+ ],
+
+ 'enum-member': [
+ include('spaces'),
+ (r'\(', Punctuation,
+ ('#pop', 'semicolon', 'flag', 'function-param')),
+ default(('#pop', 'semicolon', 'flag')),
+ ],
+
+ 'class': [
+ include('spaces'),
+ default(('#pop', 'class-body', 'bracket-open', 'extends',
+ 'type-param-constraint', 'type-name')),
+ ],
+
+ 'extends': [
+ include('spaces'),
+ (r'(?:extends|implements)\b', Keyword.Declaration, 'type'),
+ (r',', Punctuation), # the comma is made optional here, since haxe2
+ # requires the comma but haxe3 does not allow it
+ default('#pop'),
+ ],
+
+ 'bracket-open': [
+ include('spaces'),
+ (r'\{', Punctuation, '#pop'),
+ ],
+
+ 'bracket-close': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+
+ 'class-body': [
+ include('spaces'),
+ include('meta'),
+ (r'\}', Punctuation, '#pop'),
+ (r'(?:static|public|private|override|dynamic|inline|macro)\b',
+ Keyword.Declaration),
+ default('class-member'),
+ ],
+
+ 'class-member': [
+ include('spaces'),
+ (r'(var)\b', Keyword.Declaration,
+ ('#pop', 'optional-semicolon', 'var')),
+ (r'(function)\b', Keyword.Declaration,
+ ('#pop', 'optional-semicolon', 'class-method')),
+ ],
+
+ # local function, anonymous or not
+ 'function-local': [
+ include('spaces'),
+ (ident_no_keyword, Name.Function,
+ ('#pop', 'optional-expr', 'flag', 'function-param',
+ 'parenthesis-open', 'type-param-constraint')),
+ default(('#pop', 'optional-expr', 'flag', 'function-param',
+ 'parenthesis-open', 'type-param-constraint')),
+ ],
+
+ 'optional-expr': [
+ include('spaces'),
+ include('expr'),
+ default('#pop'),
+ ],
+
+ 'class-method': [
+ include('spaces'),
+ (ident, Name.Function, ('#pop', 'optional-expr', 'flag',
+ 'function-param', 'parenthesis-open',
+ 'type-param-constraint')),
+ ],
+
+ # function arguments
+ 'function-param': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r'\?', Punctuation),
+ (ident_no_keyword, Name,
+ ('#pop', 'function-param-sep', 'assign', 'flag')),
+ ],
+
+ 'function-param-sep': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'function-param')),
+ ],
+
+ 'prop-get-set': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'parenthesis-close',
+ 'prop-get-set-opt', 'comma', 'prop-get-set-opt')),
+ default('#pop'),
+ ],
+
+ 'prop-get-set-opt': [
+ include('spaces'),
+ (r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'),
+ (ident_no_keyword, Text, '#pop'), # custom getter/setter
+ ],
+
+ 'expr-statement': [
+ include('spaces'),
+ # makes semicolon optional here, just to avoid checking the last
+ # one is bracket or not.
+ default(('#pop', 'optional-semicolon', 'expr')),
+ ],
+
+ 'expr': [
+ include('spaces'),
+ (r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body',
+ 'meta-ident', 'meta-colon')),
+ (r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator),
+ (r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')),
+ (r'(?:static|public|private|override|dynamic|inline)\b',
+ Keyword.Declaration),
+ (r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain',
+ 'function-local')),
+ (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')),
+ (r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')),
+ (r'(?:this)\b', Keyword, ('#pop', 'expr-chain')),
+ (r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')),
+ (r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')),
+ (r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')),
+ (r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')),
+ (r'(?:switch)\b', Keyword, ('#pop', 'switch')),
+ (r'(?:if)\b', Keyword, ('#pop', 'if')),
+ (r'(?:do)\b', Keyword, ('#pop', 'do')),
+ (r'(?:while)\b', Keyword, ('#pop', 'while')),
+ (r'(?:for)\b', Keyword, ('#pop', 'for')),
+ (r'(?:untyped|throw)\b', Keyword),
+ (r'(?:return)\b', Keyword, ('#pop', 'optional-expr')),
+ (r'(?:macro)\b', Keyword, ('#pop', 'macro')),
+ (r'(?:continue|break)\b', Keyword, '#pop'),
+ (r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')),
+ (ident_no_keyword, Name, ('#pop', 'expr-chain')),
+
+ # Float
+ (r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float, ('#pop', 'expr-chain')),
+
+ # Int
+ (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')),
+ (r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')),
+
+ # String
+ (r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')),
+ (r'"', String.Double, ('#pop', 'expr-chain', 'string-double')),
+
+ # EReg
+ (r'~/(\\\\|\\[^\\]|[^/\\\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')),
+
+ # Array
+ (r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')),
+ ],
+
+ 'expr-chain': [
+ include('spaces'),
+ (r'(?:\+\+|\-\-)', Operator),
+ (binop, Operator, ('#pop', 'expr')),
+ (r'(?:in)\b', Keyword, ('#pop', 'expr')),
+ (r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')),
+ (r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)),
+ (r'\[', Punctuation, 'array-access'),
+ (r'\(', Punctuation, 'call'),
+ default('#pop'),
+ ],
+
+ # macro reification
+ 'macro': [
+ include('spaces'),
+ include('meta'),
+ (r':', Punctuation, ('#pop', 'type')),
+
+ (r'(?:extern|private)\b', Keyword.Declaration),
+ (r'(?:abstract)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'abstract')),
+ (r'(?:class|interface)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'macro-class')),
+ (r'(?:enum)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'enum')),
+ (r'(?:typedef)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'typedef')),
+
+ default(('#pop', 'expr')),
+ ],
+
+ 'macro-class': [
+ (r'\{', Punctuation, ('#pop', 'class-body')),
+ include('class')
+ ],
+
+ # cast can be written as "cast expr" or "cast(expr, type)"
+ 'cast': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'parenthesis-close',
+ 'cast-type', 'expr')),
+ default(('#pop', 'expr')),
+ ],
+
+ # optionally give a type as the 2nd argument of cast()
+ 'cast-type': [
+ include('spaces'),
+ (r',', Punctuation, ('#pop', 'type')),
+ default('#pop'),
+ ],
+
+ 'catch': [
+ include('spaces'),
+ (r'(?:catch)\b', Keyword, ('expr', 'function-param',
+ 'parenthesis-open')),
+ default('#pop'),
+ ],
+
+ # do-while loop
+ 'do': [
+ include('spaces'),
+ default(('#pop', 'do-while', 'expr')),
+ ],
+
+ # the while after do
+ 'do-while': [
+ include('spaces'),
+ (r'(?:while)\b', Keyword, ('#pop', 'parenthesis',
+ 'parenthesis-open')),
+ ],
+
+ 'while': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
+ ],
+
+ 'for': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
+ ],
+
+ 'if': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr',
+ 'parenthesis')),
+ ],
+
+ 'else': [
+ include('spaces'),
+ (r'(?:else)\b', Keyword, ('#pop', 'expr')),
+ default('#pop'),
+ ],
+
+ 'switch': [
+ include('spaces'),
+ default(('#pop', 'switch-body', 'bracket-open', 'expr')),
+ ],
+
+ 'switch-body': [
+ include('spaces'),
+ (r'(?:case|default)\b', Keyword, ('case-block', 'case')),
+ (r'\}', Punctuation, '#pop'),
+ ],
+
+ 'case': [
+ include('spaces'),
+ (r':', Punctuation, '#pop'),
+ default(('#pop', 'case-sep', 'case-guard', 'expr')),
+ ],
+
+ 'case-sep': [
+ include('spaces'),
+ (r':', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'case')),
+ ],
+
+ 'case-guard': [
+ include('spaces'),
+ (r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')),
+ default('#pop'),
+ ],
+
+ # optional multiple expr under a case
+ 'case-block': [
+ include('spaces'),
+ (r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'),
+ default('#pop'),
+ ],
+
+ 'new': [
+ include('spaces'),
+ default(('#pop', 'call', 'parenthesis-open', 'type')),
+ ],
+
+ 'array-decl': [
+ include('spaces'),
+ (r'\]', Punctuation, '#pop'),
+ default(('#pop', 'array-decl-sep', 'expr')),
+ ],
+
+ 'array-decl-sep': [
+ include('spaces'),
+ (r'\]', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'array-decl')),
+ ],
+
+ 'array-access': [
+ include('spaces'),
+ default(('#pop', 'array-access-close', 'expr')),
+ ],
+
+ 'array-access-close': [
+ include('spaces'),
+ (r'\]', Punctuation, '#pop'),
+ ],
+
+ 'comma': [
+ include('spaces'),
+ (r',', Punctuation, '#pop'),
+ ],
+
+ 'colon': [
+ include('spaces'),
+ (r':', Punctuation, '#pop'),
+ ],
+
+ 'semicolon': [
+ include('spaces'),
+ (r';', Punctuation, '#pop'),
+ ],
+
+ 'optional-semicolon': [
+ include('spaces'),
+ (r';', Punctuation, '#pop'),
+ default('#pop'),
+ ],
+
+ # identity that CAN be a Haxe keyword
+ 'ident': [
+ include('spaces'),
+ (ident, Name, '#pop'),
+ ],
+
+ 'dollar': [
+ include('spaces'),
+ (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket-close', 'expr')),
+ default(('#pop', 'expr-chain')),
+ ],
+
+ 'type-name': [
+ include('spaces'),
+ (typeid, Name, '#pop'),
+ ],
+
+ 'type-full-name': [
+ include('spaces'),
+ (r'\.', Punctuation, 'ident'),
+ default('#pop'),
+ ],
+
+ 'type': [
+ include('spaces'),
+ (r'\?', Punctuation),
+ (ident, Name, ('#pop', 'type-check', 'type-full-name')),
+ (r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')),
+ (r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')),
+ ],
+
+ 'type-parenthesis': [
+ include('spaces'),
+ default(('#pop', 'parenthesis-close', 'type')),
+ ],
+
+ 'type-check': [
+ include('spaces'),
+ (r'->', Punctuation, ('#pop', 'type')),
+ (r'<(?!=)', Punctuation, 'type-param'),
+ default('#pop'),
+ ],
+
+ 'type-struct': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r'\?', Punctuation),
+ (r'>', Punctuation, ('comma', 'type')),
+ (ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')),
+ include('class-body'),
+ ],
+
+ 'type-struct-sep': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'type-struct')),
+ ],
+
+ # type-param can be a normal type or a constant literal...
+ 'type-param-type': [
+ # Float
+ (r'\.[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+\.[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float, '#pop'),
+
+ # Int
+ (r'0x[0-9a-fA-F]+', Number.Hex, '#pop'),
+ (r'[0-9]+', Number.Integer, '#pop'),
+
+ # String
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+
+ # EReg
+ (r'~/(\\\\|\\[^\\]|[^/\\\n])*/[gim]*', String.Regex, '#pop'),
+
+ # Array
+ (r'\[', Operator, ('#pop', 'array-decl')),
+
+ include('type'),
+ ],
+
+ # type-param part of a type
+ # ie. the <A,B> path in Map<A,B>
+ 'type-param': [
+ include('spaces'),
+ default(('#pop', 'type-param-sep', 'type-param-type')),
+ ],
+
+ 'type-param-sep': [
+ include('spaces'),
+ (r'>', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'type-param')),
+ ],
+
+ # optional type-param that may include constraint
+ # ie. <T:Constraint, T2:(ConstraintA,ConstraintB)>
+ 'type-param-constraint': [
+ include('spaces'),
+ (r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep',
+ 'type-param-constraint-flag', 'type-name')),
+ default('#pop'),
+ ],
+
+ 'type-param-constraint-sep': [
+ include('spaces'),
+ (r'>', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'type-param-constraint-sep',
+ 'type-param-constraint-flag', 'type-name')),
+ ],
+
+ # the optional constraint inside type-param
+ 'type-param-constraint-flag': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')),
+ default('#pop'),
+ ],
+
+ 'type-param-constraint-flag-type': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep',
+ 'type')),
+ default(('#pop', 'type')),
+ ],
+
+ 'type-param-constraint-flag-type-sep': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation, 'type'),
+ ],
+
+ # a parenthesis expr that contain exactly one expr
+ 'parenthesis': [
+ include('spaces'),
+ default(('#pop', 'parenthesis-close', 'flag', 'expr')),
+ ],
+
+ 'parenthesis-open': [
+ include('spaces'),
+ (r'\(', Punctuation, '#pop'),
+ ],
+
+ 'parenthesis-close': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ ],
+
+ 'var': [
+ include('spaces'),
+ (ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag', 'prop-get-set')),
+ ],
+
+ # optional more var decl.
+ 'var-sep': [
+ include('spaces'),
+ (r',', Punctuation, ('#pop', 'var')),
+ default('#pop'),
+ ],
+
+ # optional assignment
+ 'assign': [
+ include('spaces'),
+ (r'=', Operator, ('#pop', 'expr')),
+ default('#pop'),
+ ],
+
+ # optional type flag
+ 'flag': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'type')),
+ default('#pop'),
+ ],
+
+ # colon as part of a ternary operator (?:)
+ 'ternary': [
+ include('spaces'),
+ (r':', Operator, '#pop'),
+ ],
+
+ # function call
+ 'call': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ default(('#pop', 'call-sep', 'expr')),
+ ],
+
+ # after a call param
+ 'call-sep': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'call')),
+ ],
+
+ # bracket can be block or object
+ 'bracket': [
+ include('spaces'),
+ (r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name,
+ ('#pop', 'bracket-check')),
+ (r"'", String.Single, ('#pop', 'bracket-check', 'string-single')),
+ (r'"', String.Double, ('#pop', 'bracket-check', 'string-double')),
+ default(('#pop', 'block')),
+ ],
+
+ 'bracket-check': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'object-sep', 'expr')), # is object
+ default(('#pop', 'block', 'optional-semicolon', 'expr-chain')), # is block
+ ],
+
+ # code block
+ 'block': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ default('expr-statement'),
+ ],
+
+ # object in key-value pairs
+ 'object': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ default(('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string'))
+ ],
+
+ # a key of an object
+ 'ident-or-string': [
+ include('spaces'),
+ (ident_no_keyword, Name, '#pop'),
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+ ],
+
+ # after a key-value pair in object
+ 'object-sep': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'object')),
+ ],
+
+
+
+ }
+
+ def analyse_text(text):
+ if re.match(r'\w+\s*:\s*\w', text):
+ return 0.3
+
+
+class HxmlLexer(RegexLexer):
+ """
+ Lexer for haXe build files.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Hxml'
+ url = 'https://haxe.org/manual/compiler-usage-hxml.html'
+ aliases = ['haxeml', 'hxml']
+ filenames = ['*.hxml']
+
+ tokens = {
+ 'root': [
+ # Separator
+ (r'(--)(next)', bygroups(Punctuation, Generic.Heading)),
+ # Compiler switches with one dash
+ (r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)),
+ # Compilerswitches with two dashes
+ (r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|'
+ r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)),
+ # Targets and other options that take an argument
+ (r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|'
+ r'cp|cmd)( +)(.+)',
+ bygroups(Punctuation, Keyword, Whitespace, String)),
+ # Options that take only numerical arguments
+ (r'(-)(swf-version)( +)(\d+)',
+ bygroups(Punctuation, Keyword, Whitespace, Number.Integer)),
+ # An Option that defines the size, the fps and the background
+ # color of an flash movie
+ (r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})',
+ bygroups(Punctuation, Keyword, Whitespace, Number.Integer,
+ Punctuation, Number.Integer, Punctuation, Number.Integer,
+ Punctuation, Number.Hex)),
+ # options with two dashes that takes arguments
+ (r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)'
+ r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)),
+ # Single line comment, multiline ones are not allowed.
+ (r'#.*', Comment.Single)
+ ]
+ }
diff --git a/pygments/lexers/hdl.py b/pygments/lexers/hdl.py
new file mode 100644
index 0000000..c3be3a5
--- /dev/null
+++ b/pygments/lexers/hdl.py
@@ -0,0 +1,465 @@
+"""
+ pygments.lexers.hdl
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for hardware descriptor languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, include, using, this, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
+
+
+class VerilogLexer(RegexLexer):
+ """
+ For verilog source code with preprocessor directives.
+
+ .. versionadded:: 1.4
+ """
+ name = 'verilog'
+ aliases = ['verilog', 'v']
+ filenames = ['*.v']
+ mimetypes = ['text/x-verilog']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
+
+ tokens = {
+ 'root': [
+ (r'^\s*`define', Comment.Preproc, 'macro'),
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
+ (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'[{}#@]', Punctuation),
+ (r'L?"', String, 'string'),
+ (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
+ (r'([0-9]+)|(\'b)[01]+', Number.Bin),
+ (r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
+ (r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
+ (r'\'[01xz]', Number),
+ (r'\d+[Ll]?', Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'[()\[\],.;\']', Punctuation),
+ (r'`[a-zA-Z_]\w*', Name.Constant),
+
+ (r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text)),
+ (r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text),
+ 'import'),
+
+ (words((
+ 'always', 'always_comb', 'always_ff', 'always_latch', 'and',
+ 'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1',
+ 'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign',
+ 'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase',
+ 'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive',
+ 'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for',
+ 'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0',
+ 'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large',
+ 'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge',
+ 'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed',
+ 'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1',
+ 'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return',
+ 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed',
+ 'small', 'specify', 'specparam', 'strength', 'string', 'strong0',
+ 'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1',
+ 'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait',
+ 'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'),
+ Keyword),
+
+ (words((
+ 'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype',
+ 'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected',
+ 'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate',
+ 'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames',
+ 'nounconnected_drive', 'protect', 'protected', 'remove_gatenames',
+ 'remove_netnames', 'resetall', 'timescale', 'unconnected_drive',
+ 'undef'), prefix=r'`', suffix=r'\b'),
+ Comment.Preproc),
+
+ (words((
+ 'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose',
+ 'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite',
+ 'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log',
+ 'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale',
+ 'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset',
+ 'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope',
+ 'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb',
+ 'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'),
+ prefix=r'\$', suffix=r'\b'),
+ Name.Builtin),
+
+ (words((
+ 'byte', 'shortint', 'int', 'longint', 'integer', 'time',
+ 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
+ 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wor'
+ 'shortreal', 'real', 'realtime'), suffix=r'\b'),
+ Keyword.Type),
+ (r'[a-zA-Z_]\w*:(?!:)', Name.Label),
+ (r'\$?[a-zA-Z_]\w*', Name),
+ (r'\\(\S+)', Name),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'//.*?\n', Comment.Single, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'import': [
+ (r'[\w:]+\*?', Name.Namespace, '#pop')
+ ]
+ }
+
+ def analyse_text(text):
+ """Verilog code will use one of reg/wire/assign for sure, and that
+ is not common elsewhere."""
+ result = 0
+ if 'reg' in text:
+ result += 0.1
+ if 'wire' in text:
+ result += 0.1
+ if 'assign' in text:
+ result += 0.1
+
+ return result
+
+
+class SystemVerilogLexer(RegexLexer):
+ """
+ Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
+ 1800-2009 standard.
+
+ .. versionadded:: 1.5
+ """
+ name = 'systemverilog'
+ aliases = ['systemverilog', 'sv']
+ filenames = ['*.sv', '*.svh']
+ mimetypes = ['text/x-systemverilog']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
+
+ tokens = {
+ 'root': [
+ (r'^(\s*)(`define)', bygroups(Whitespace, Comment.Preproc), 'macro'),
+ (r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace)),
+ (r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'import'),
+
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
+ (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'[{}#@]', Punctuation),
+ (r'L?"', String, 'string'),
+ (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+
+ (r'([1-9][_0-9]*)?\s*\'[sS]?[bB]\s*[xXzZ?01][_xXzZ?01]*',
+ Number.Bin),
+ (r'([1-9][_0-9]*)?\s*\'[sS]?[oO]\s*[xXzZ?0-7][_xXzZ?0-7]*',
+ Number.Oct),
+ (r'([1-9][_0-9]*)?\s*\'[sS]?[dD]\s*[xXzZ?0-9][_xXzZ?0-9]*',
+ Number.Integer),
+ (r'([1-9][_0-9]*)?\s*\'[sS]?[hH]\s*[xXzZ?0-9a-fA-F][_xXzZ?0-9a-fA-F]*',
+ Number.Hex),
+
+ (r'\'[01xXzZ]', Number),
+ (r'[0-9][_0-9]*', Number.Integer),
+
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (words(('inside', 'dist'), suffix=r'\b'), Operator.Word),
+
+ (r'[()\[\],.;\'$]', Punctuation),
+ (r'`[a-zA-Z_]\w*', Name.Constant),
+
+ (words((
+ 'accept_on', 'alias', 'always', 'always_comb', 'always_ff',
+ 'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic',
+ 'before', 'begin', 'bind', 'bins', 'binsof', 'break', 'buf',
+ 'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cell',
+ 'checker', 'clocking', 'cmos', 'config',
+ 'constraint', 'context', 'continue', 'cover', 'covergroup',
+ 'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design',
+ 'disable', 'do', 'edge', 'else', 'end', 'endcase',
+ 'endchecker', 'endclocking', 'endconfig', 'endfunction',
+ 'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage',
+ 'endprimitive', 'endprogram', 'endproperty', 'endsequence',
+ 'endspecify', 'endtable', 'endtask', 'enum', 'eventually',
+ 'expect', 'export', 'extern', 'final', 'first_match',
+ 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function',
+ 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff',
+ 'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'implements', 'import',
+ 'incdir', 'include', 'initial', 'inout', 'input',
+ 'instance', 'interconnect', 'interface', 'intersect', 'join',
+ 'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
+ 'local', 'localparam', 'macromodule', 'matches',
+ 'medium', 'modport', 'module', 'nand', 'negedge', 'nettype', 'new', 'nexttime',
+ 'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null',
+ 'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge',
+ 'primitive', 'priority', 'program', 'property', 'protected', 'pull0',
+ 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect',
+ 'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase',
+ 'randsequence', 'rcmos', 'ref',
+ 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
+ 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually',
+ 's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence',
+ 'showcancelled', 'small', 'soft', 'solve',
+ 'specify', 'specparam', 'static', 'strong', 'strong0',
+ 'strong1', 'struct', 'super', 'sync_accept_on',
+ 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
+ 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1',
+ 'typedef', 'union', 'unique', 'unique0', 'until',
+ 'until_with', 'untyped', 'use', 'vectored',
+ 'virtual', 'wait', 'wait_order', 'weak', 'weak0',
+ 'weak1', 'while', 'wildcard', 'with', 'within',
+ 'xnor', 'xor'),
+ suffix=r'\b'),
+ Keyword),
+
+ (r'(class)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class)),
+ (r'(extends)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class)),
+ (r'(endclass\b)(?:(\s*)(:)(\s*)([a-zA-Z_]\w*))?',
+ bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Class)),
+
+ (words((
+ # Variable types
+ 'bit', 'byte', 'chandle', 'const', 'event', 'int', 'integer',
+ 'logic', 'longint', 'real', 'realtime', 'reg', 'shortint',
+ 'shortreal', 'signed', 'string', 'time', 'type', 'unsigned',
+ 'var', 'void',
+ # Net types
+ 'supply0', 'supply1', 'tri', 'triand', 'trior', 'trireg',
+ 'tri0', 'tri1', 'uwire', 'wand', 'wire', 'wor'),
+ suffix=r'\b'),
+ Keyword.Type),
+
+ (words((
+ '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine',
+ '`default_nettype', '`define', '`else', '`elsif', '`end_keywords',
+ '`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include',
+ '`line', '`nounconnected_drive', '`pragma', '`resetall',
+ '`timescale', '`unconnected_drive', '`undef', '`undefineall'),
+ suffix=r'\b'),
+ Comment.Preproc),
+
+ (words((
+ # Simulation control tasks (20.2)
+ '$exit', '$finish', '$stop',
+ # Simulation time functions (20.3)
+ '$realtime', '$stime', '$time',
+ # Timescale tasks (20.4)
+ '$printtimescale', '$timeformat',
+ # Conversion functions
+ '$bitstoreal', '$bitstoshortreal', '$cast', '$itor',
+ '$realtobits', '$rtoi', '$shortrealtobits', '$signed',
+ '$unsigned',
+ # Data query functions (20.6)
+ '$bits', '$isunbounded', '$typename',
+ # Array query functions (20.7)
+ '$dimensions', '$high', '$increment', '$left', '$low', '$right',
+ '$size', '$unpacked_dimensions',
+ # Math functions (20.8)
+ '$acos', '$acosh', '$asin', '$asinh', '$atan', '$atan2',
+ '$atanh', '$ceil', '$clog2', '$cos', '$cosh', '$exp', '$floor',
+ '$hypot', '$ln', '$log10', '$pow', '$sin', '$sinh', '$sqrt',
+ '$tan', '$tanh',
+ # Bit vector system functions (20.9)
+ '$countbits', '$countones', '$isunknown', '$onehot', '$onehot0',
+ # Severity tasks (20.10)
+ '$info', '$error', '$fatal', '$warning',
+ # Assertion control tasks (20.12)
+ '$assertcontrol', '$assertfailoff', '$assertfailon',
+ '$assertkill', '$assertnonvacuouson', '$assertoff', '$asserton',
+ '$assertpassoff', '$assertpasson', '$assertvacuousoff',
+ # Sampled value system functions (20.13)
+ '$changed', '$changed_gclk', '$changing_gclk', '$falling_gclk',
+ '$fell', '$fell_gclk', '$future_gclk', '$past', '$past_gclk',
+ '$rising_gclk', '$rose', '$rose_gclk', '$sampled', '$stable',
+ '$stable_gclk', '$steady_gclk',
+ # Coverage control functions (20.14)
+ '$coverage_control', '$coverage_get', '$coverage_get_max',
+ '$coverage_merge', '$coverage_save', '$get_coverage',
+ '$load_coverage_db', '$set_coverage_db_name',
+ # Probabilistic distribution functions (20.15)
+ '$dist_chi_square', '$dist_erlang', '$dist_exponential',
+ '$dist_normal', '$dist_poisson', '$dist_t', '$dist_uniform',
+ '$random',
+ # Stochastic analysis tasks and functions (20.16)
+ '$q_add', '$q_exam', '$q_full', '$q_initialize', '$q_remove',
+ # PLA modeling tasks (20.17)
+ '$async$and$array', '$async$and$plane', '$async$nand$array',
+ '$async$nand$plane', '$async$nor$array', '$async$nor$plane',
+ '$async$or$array', '$async$or$plane', '$sync$and$array',
+ '$sync$and$plane', '$sync$nand$array', '$sync$nand$plane',
+ '$sync$nor$array', '$sync$nor$plane', '$sync$or$array',
+ '$sync$or$plane',
+ # Miscellaneous tasks and functions (20.18)
+ '$system',
+ # Display tasks (21.2)
+ '$display', '$displayb', '$displayh', '$displayo', '$monitor',
+ '$monitorb', '$monitorh', '$monitoro', '$monitoroff',
+ '$monitoron', '$strobe', '$strobeb', '$strobeh', '$strobeo',
+ '$write', '$writeb', '$writeh', '$writeo',
+ # File I/O tasks and functions (21.3)
+ '$fclose', '$fdisplay', '$fdisplayb', '$fdisplayh',
+ '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', '$fgets',
+ '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen',
+ '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb',
+ '$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb',
+ '$fwriteh', '$fwriteo', '$rewind', '$sformat', '$sformatf',
+ '$sscanf', '$swrite', '$swriteb', '$swriteh', '$swriteo',
+ '$ungetc',
+ # Memory load tasks (21.4)
+ '$readmemb', '$readmemh',
+ # Memory dump tasks (21.5)
+ '$writememb', '$writememh',
+ # Command line input (21.6)
+ '$test$plusargs', '$value$plusargs',
+ # VCD tasks (21.7)
+ '$dumpall', '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff',
+ '$dumpon', '$dumpports', '$dumpportsall', '$dumpportsflush',
+ '$dumpportslimit', '$dumpportsoff', '$dumpportson', '$dumpvars',
+ ), suffix=r'\b'),
+ Name.Builtin),
+
+ (r'[a-zA-Z_]\w*:(?!:)', Name.Label),
+ (r'\$?[a-zA-Z_]\w*', Name),
+ (r'\\(\S+)', Name),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'//.*?$', Comment.Single, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'import': [
+ (r'[\w:]+\*?', Name.Namespace, '#pop')
+ ]
+ }
+
+
+class VhdlLexer(RegexLexer):
+ """
+ For VHDL source code.
+
+ .. versionadded:: 1.5
+ """
+ name = 'vhdl'
+ aliases = ['vhdl']
+ filenames = ['*.vhdl', '*.vhd']
+ mimetypes = ['text/x-vhdl']
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
+ (r'--.*?$', Comment.Single),
+ (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r"'[a-z_]\w*", Name.Attribute),
+ (r'[()\[\],.;\']', Punctuation),
+ (r'"[^\n\\"]*"', String),
+
+ (r'(library)(\s+)([a-z_]\w*)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'(use)(\s+)(entity)', bygroups(Keyword, Whitespace, Keyword)),
+ (r'(use)(\s+)([a-z_][\w.]*\.)(all)',
+ bygroups(Keyword, Whitespace, Name.Namespace, Keyword)),
+ (r'(use)(\s+)([a-z_][\w.]*)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'(std|ieee)(\.[a-z_]\w*)',
+ bygroups(Name.Namespace, Name.Namespace)),
+ (words(('std', 'ieee', 'work'), suffix=r'\b'),
+ Name.Namespace),
+ (r'(entity|component)(\s+)([a-z_]\w*)',
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
+ r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword, Whitespace,
+ Name.Class, Whitespace, Keyword)),
+ (r'([a-z_]\w*)(:)(\s+)(process|for)',
+ bygroups(Name.Class, Operator, Whitespace, Keyword)),
+ (r'(end)(\s+)', bygroups(using(this), Whitespace), 'endblock'),
+
+ include('types'),
+ include('keywords'),
+ include('numbers'),
+
+ (r'[a-z_]\w*', Name),
+ ],
+ 'endblock': [
+ include('keywords'),
+ (r'[a-z_]\w*', Name.Class),
+ (r'\s+', Whitespace),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'types': [
+ (words((
+ 'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
+ 'delay_length', 'natural', 'positive', 'string', 'bit_vector',
+ 'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
+ 'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'),
+ Keyword.Type),
+ ],
+ 'keywords': [
+ (words((
+ 'abs', 'access', 'after', 'alias', 'all', 'and',
+ 'architecture', 'array', 'assert', 'attribute', 'begin', 'block',
+ 'body', 'buffer', 'bus', 'case', 'component', 'configuration',
+ 'constant', 'disconnect', 'downto', 'else', 'elsif', 'end',
+ 'entity', 'exit', 'file', 'for', 'function', 'generate',
+ 'generic', 'group', 'guarded', 'if', 'impure', 'in',
+ 'inertial', 'inout', 'is', 'label', 'library', 'linkage',
+ 'literal', 'loop', 'map', 'mod', 'nand', 'new',
+ 'next', 'nor', 'not', 'null', 'of', 'on',
+ 'open', 'or', 'others', 'out', 'package', 'port',
+ 'postponed', 'procedure', 'process', 'pure', 'range', 'record',
+ 'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select',
+ 'severity', 'signal', 'shared', 'sla', 'sll', 'sra',
+ 'srl', 'subtype', 'then', 'to', 'transport', 'type',
+ 'units', 'until', 'use', 'variable', 'wait', 'when',
+ 'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
+ Keyword),
+ ],
+ 'numbers': [
+ (r'\d{1,2}#[0-9a-f_]+#?', Number.Integer),
+ (r'\d+', Number.Integer),
+ (r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float),
+ (r'X"[0-9a-f_]+"', Number.Hex),
+ (r'O"[0-7_]+"', Number.Oct),
+ (r'B"[01_]+"', Number.Bin),
+ ],
+ }
diff --git a/pygments/lexers/hexdump.py b/pygments/lexers/hexdump.py
new file mode 100644
index 0000000..b766d1d
--- /dev/null
+++ b/pygments/lexers/hexdump.py
@@ -0,0 +1,102 @@
+"""
+ pygments.lexers.hexdump
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for hexadecimal dumps.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include
+from pygments.token import Name, Number, String, Punctuation, Whitespace
+
+__all__ = ['HexdumpLexer']
+
+
+class HexdumpLexer(RegexLexer):
+ """
+ For typical hex dump output formats by the UNIX and GNU/Linux tools ``hexdump``,
+ ``hd``, ``hexcat``, ``od`` and ``xxd``, and the DOS tool ``DEBUG``. For example:
+
+ .. sourcecode:: hexdump
+
+ 00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............|
+ 00000010 02 00 3e 00 01 00 00 00 c5 48 40 00 00 00 00 00 |..>......H@.....|
+
+ The specific supported formats are the outputs of:
+
+ * ``hexdump FILE``
+ * ``hexdump -C FILE`` -- the `canonical` format used in the example.
+ * ``hd FILE`` -- same as ``hexdump -C FILE``.
+ * ``hexcat FILE``
+ * ``od -t x1z FILE``
+ * ``xxd FILE``
+ * ``DEBUG.EXE FILE.COM`` and entering ``d`` to the prompt.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Hexdump'
+ aliases = ['hexdump']
+
+ hd = r'[0-9A-Ha-h]'
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ include('offset'),
+ (r'('+hd+r'{2})(\-)('+hd+r'{2})',
+ bygroups(Number.Hex, Punctuation, Number.Hex)),
+ (hd+r'{2}', Number.Hex),
+ (r'(\s{2,3})(\>)(.{16})(\<)$',
+ bygroups(Whitespace, Punctuation, String, Punctuation), 'bracket-strings'),
+ (r'(\s{2,3})(\|)(.{16})(\|)$',
+ bygroups(Whitespace, Punctuation, String, Punctuation), 'piped-strings'),
+ (r'(\s{2,3})(\>)(.{1,15})(\<)$',
+ bygroups(Whitespace, Punctuation, String, Punctuation)),
+ (r'(\s{2,3})(\|)(.{1,15})(\|)$',
+ bygroups(Whitespace, Punctuation, String, Punctuation)),
+ (r'(\s{2,3})(.{1,15})$', bygroups(Whitespace, String)),
+ (r'(\s{2,3})(.{16}|.{20})$', bygroups(Whitespace, String), 'nonpiped-strings'),
+ (r'\s', Whitespace),
+ (r'^\*', Punctuation),
+ ],
+ 'offset': [
+ (r'^('+hd+'+)(:)', bygroups(Name.Label, Punctuation), 'offset-mode'),
+ (r'^'+hd+'+', Name.Label),
+ ],
+ 'offset-mode': [
+ (r'\s', Whitespace, '#pop'),
+ (hd+'+', Name.Label),
+ (r':', Punctuation)
+ ],
+ 'piped-strings': [
+ (r'\n', Whitespace),
+ include('offset'),
+ (hd+r'{2}', Number.Hex),
+ (r'(\s{2,3})(\|)(.{1,16})(\|)$',
+ bygroups(Whitespace, Punctuation, String, Punctuation)),
+ (r'\s', Whitespace),
+ (r'^\*', Punctuation),
+ ],
+ 'bracket-strings': [
+ (r'\n', Whitespace),
+ include('offset'),
+ (hd+r'{2}', Number.Hex),
+ (r'(\s{2,3})(\>)(.{1,16})(\<)$',
+ bygroups(Whitespace, Punctuation, String, Punctuation)),
+ (r'\s', Whitespace),
+ (r'^\*', Punctuation),
+ ],
+ 'nonpiped-strings': [
+ (r'\n', Whitespace),
+ include('offset'),
+ (r'('+hd+r'{2})(\-)('+hd+r'{2})',
+ bygroups(Number.Hex, Punctuation, Number.Hex)),
+ (hd+r'{2}', Number.Hex),
+ (r'(\s{19,})(.{1,20}?)$', bygroups(Whitespace, String)),
+ (r'(\s{2,3})(.{1,20})$', bygroups(Whitespace, String)),
+ (r'\s', Whitespace),
+ (r'^\*', Punctuation),
+ ],
+ }
diff --git a/pygments/lexers/html.py b/pygments/lexers/html.py
new file mode 100644
index 0000000..f2e0463
--- /dev/null
+++ b/pygments/lexers/html.py
@@ -0,0 +1,605 @@
+"""
+ pygments.lexers.html
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for HTML, XML and related markup.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
+ default, using
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Punctuation, Whitespace
+from pygments.util import looks_like_xml, html_doctype_matches
+
+from pygments.lexers.javascript import JavascriptLexer
+from pygments.lexers.jvm import ScalaLexer
+from pygments.lexers.css import CssLexer, _indentation, _starts_block
+from pygments.lexers.ruby import RubyLexer
+
+__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
+ 'ScamlLexer', 'PugLexer']
+
+
+class HtmlLexer(RegexLexer):
+ """
+ For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
+ by the appropriate lexer.
+ """
+
+ name = 'HTML'
+ url = 'https://html.spec.whatwg.org/'
+ aliases = ['html']
+ filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
+ mimetypes = ['text/html', 'application/xhtml+xml']
+
+ flags = re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ ('[^<&]+', Text),
+ (r'&\S*?;', Name.Entity),
+ (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
+ (r'<!--.*?-->', Comment.Multiline),
+ (r'<\?.*?\?>', Comment.Preproc),
+ ('<![^>]*>', Comment.Preproc),
+ (r'(<)(\s*)(script)(\s*)',
+ bygroups(Punctuation, Text, Name.Tag, Text),
+ ('script-content', 'tag')),
+ (r'(<)(\s*)(style)(\s*)',
+ bygroups(Punctuation, Text, Name.Tag, Text),
+ ('style-content', 'tag')),
+ # note: this allows tag names not used in HTML like <x:with-dash>,
+ # this is to support yet-unknown template engines and the like
+ (r'(<)(\s*)([\w:.-]+)',
+ bygroups(Punctuation, Text, Name.Tag), 'tag'),
+ (r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
+ bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
+ Punctuation)),
+ ],
+ 'tag': [
+ (r'\s+', Text),
+ (r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
+ 'attr'),
+ (r'[\w:-]+', Name.Attribute),
+ (r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
+ ],
+ 'script-content': [
+ (r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
+ bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
+ Punctuation), '#pop'),
+ (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
+ # fallback cases for when there is no closing script tag
+ # first look for newline and then go back into root state
+ # if that fails just read the rest of the file
+ # this is similar to the error handling logic in lexer.py
+ (r'.+?\n', using(JavascriptLexer), '#pop'),
+ (r'.+', using(JavascriptLexer), '#pop'),
+ ],
+ 'style-content': [
+ (r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
+ bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
+ Punctuation),'#pop'),
+ (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
+ # fallback cases for when there is no closing style tag
+ # first look for newline and then go back into root state
+ # if that fails just read the rest of the file
+ # this is similar to the error handling logic in lexer.py
+ (r'.+?\n', using(CssLexer), '#pop'),
+ (r'.+', using(CssLexer), '#pop'),
+ ],
+ 'attr': [
+ ('".*?"', String, '#pop'),
+ ("'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ if html_doctype_matches(text):
+ return 0.5
+
+
+class DtdLexer(RegexLexer):
+ """
+ A lexer for DTDs (Document Type Definitions).
+
+ .. versionadded:: 1.5
+ """
+
+ flags = re.MULTILINE | re.DOTALL
+
+ name = 'DTD'
+ aliases = ['dtd']
+ filenames = ['*.dtd']
+ mimetypes = ['application/xml-dtd']
+
+ tokens = {
+ 'root': [
+ include('common'),
+
+ (r'(<!ELEMENT)(\s+)(\S+)',
+ bygroups(Keyword, Text, Name.Tag), 'element'),
+ (r'(<!ATTLIST)(\s+)(\S+)',
+ bygroups(Keyword, Text, Name.Tag), 'attlist'),
+ (r'(<!ENTITY)(\s+)(\S+)',
+ bygroups(Keyword, Text, Name.Entity), 'entity'),
+ (r'(<!NOTATION)(\s+)(\S+)',
+ bygroups(Keyword, Text, Name.Tag), 'notation'),
+ (r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
+ bygroups(Keyword, Name.Entity, Text, Keyword)),
+
+ (r'(<!DOCTYPE)(\s+)([^>\s]+)',
+ bygroups(Keyword, Text, Name.Tag)),
+ (r'PUBLIC|SYSTEM', Keyword.Constant),
+ (r'[\[\]>]', Keyword),
+ ],
+
+ 'common': [
+ (r'\s+', Text),
+ (r'(%|&)[^;]*;', Name.Entity),
+ ('<!--', Comment, 'comment'),
+ (r'[(|)*,?+]', Operator),
+ (r'"[^"]*"', String.Double),
+ (r'\'[^\']*\'', String.Single),
+ ],
+
+ 'comment': [
+ ('[^-]+', Comment),
+ ('-->', Comment, '#pop'),
+ ('-', Comment),
+ ],
+
+ 'element': [
+ include('common'),
+ (r'EMPTY|ANY|#PCDATA', Keyword.Constant),
+ (r'[^>\s|()?+*,]+', Name.Tag),
+ (r'>', Keyword, '#pop'),
+ ],
+
+ 'attlist': [
+ include('common'),
+ (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
+ Keyword.Constant),
+ (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
+ (r'xml:space|xml:lang', Keyword.Reserved),
+ (r'[^>\s|()?+*,]+', Name.Attribute),
+ (r'>', Keyword, '#pop'),
+ ],
+
+ 'entity': [
+ include('common'),
+ (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
+ (r'[^>\s|()?+*,]+', Name.Entity),
+ (r'>', Keyword, '#pop'),
+ ],
+
+ 'notation': [
+ include('common'),
+ (r'SYSTEM|PUBLIC', Keyword.Constant),
+ (r'[^>\s|()?+*,]+', Name.Attribute),
+ (r'>', Keyword, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ if not looks_like_xml(text) and \
+ ('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
+ return 0.8
+
+
+class XmlLexer(RegexLexer):
+ """
+ Generic lexer for XML (eXtensible Markup Language).
+ """
+
+ flags = re.MULTILINE | re.DOTALL
+
+ name = 'XML'
+ aliases = ['xml']
+ filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
+ '*.wsdl', '*.wsf']
+ mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
+ 'application/rss+xml', 'application/atom+xml']
+
+ tokens = {
+ 'root': [
+ (r'[^<&\s]+', Text),
+ (r'[^<&\S]+', Whitespace),
+ (r'&\S*?;', Name.Entity),
+ (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
+ (r'<!--.*?-->', Comment.Multiline),
+ (r'<\?.*?\?>', Comment.Preproc),
+ ('<![^>]*>', Comment.Preproc),
+ (r'<\s*[\w:.-]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
+ ],
+ 'tag': [
+ (r'\s+', Whitespace),
+ (r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
+ (r'/?\s*>', Name.Tag, '#pop'),
+ ],
+ 'attr': [
+ (r'\s+', Whitespace),
+ ('".*?"', String, '#pop'),
+ ("'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ if looks_like_xml(text):
+ return 0.45 # less than HTML
+
+
+class XsltLexer(XmlLexer):
+ """
+ A lexer for XSLT.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'XSLT'
+ aliases = ['xslt']
+ filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
+ mimetypes = ['application/xsl+xml', 'application/xslt+xml']
+
+ EXTRA_KEYWORDS = {
+ 'apply-imports', 'apply-templates', 'attribute',
+ 'attribute-set', 'call-template', 'choose', 'comment',
+ 'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
+ 'for-each', 'if', 'import', 'include', 'key', 'message',
+ 'namespace-alias', 'number', 'otherwise', 'output', 'param',
+ 'preserve-space', 'processing-instruction', 'sort',
+ 'strip-space', 'stylesheet', 'template', 'text', 'transform',
+ 'value-of', 'variable', 'when', 'with-param'
+ }
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
+ m = re.match('</?xsl:([^>]*)/?>?', value)
+
+ if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
+ yield index, Keyword, value
+ else:
+ yield index, token, value
+
+ def analyse_text(text):
+ if looks_like_xml(text) and '<xsl' in text:
+ return 0.8
+
+
+class HamlLexer(ExtendedRegexLexer):
+ """
+ For Haml markup.
+
+ .. versionadded:: 1.3
+ """
+
+ name = 'Haml'
+ aliases = ['haml']
+ filenames = ['*.haml']
+ mimetypes = ['text/x-haml']
+
+ flags = re.IGNORECASE
+ # Haml can include " |\n" anywhere,
+ # which is ignored and used to wrap long lines.
+ # To accommodate this, use this custom faux dot instead.
+ _dot = r'(?: \|\n(?=.* \|)|.)'
+
+ # In certain places, a comma at the end of the line
+ # allows line wrapping as well.
+ _comma_dot = r'(?:,\s*\n|' + _dot + ')'
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Text),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'css': [
+ (r'\.[\w:-]+', Name.Class, 'tag'),
+ (r'\#[\w:-]+', Name.Function, 'tag'),
+ ],
+
+ 'eval-or-plain': [
+ (r'[&!]?==', Punctuation, 'plain'),
+ (r'([&!]?[=~])(' + _comma_dot + r'*\n)',
+ bygroups(Punctuation, using(RubyLexer)),
+ 'root'),
+ default('plain'),
+ ],
+
+ 'content': [
+ include('css'),
+ (r'%[\w:-]+', Name.Tag, 'tag'),
+ (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
+ (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
+ bygroups(Comment, Comment.Special, Comment),
+ '#pop'),
+ (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
+ '#pop'),
+ (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
+ 'haml-comment-block'), '#pop'),
+ (r'(-)(' + _comma_dot + r'*\n)',
+ bygroups(Punctuation, using(RubyLexer)),
+ '#pop'),
+ (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
+ '#pop'),
+ include('eval-or-plain'),
+ ],
+
+ 'tag': [
+ include('css'),
+ (r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
+ (r'\[' + _dot + r'*?\]', using(RubyLexer)),
+ (r'\(', Text, 'html-attributes'),
+ (r'/[ \t]*\n', Punctuation, '#pop:2'),
+ (r'[<>]{1,2}(?=[ \t=])', Punctuation),
+ include('eval-or-plain'),
+ ],
+
+ 'plain': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
+ (r'(#\{)(' + _dot + r'*?)(\})',
+ bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'html-attributes': [
+ (r'\s+', Text),
+ (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
+ (r'[\w:-]+', Name.Attribute),
+ (r'\)', Text, '#pop'),
+ ],
+
+ 'html-attribute-value': [
+ (r'[ \t]+', Text),
+ (r'\w+', Name.Variable, '#pop'),
+ (r'@\w+', Name.Variable.Instance, '#pop'),
+ (r'\$\w+', Name.Variable.Global, '#pop'),
+ (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
+ (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
+ ],
+
+ 'html-comment-block': [
+ (_dot + '+', Comment),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'haml-comment-block': [
+ (_dot + '+', Comment.Preproc),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'filter-block': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
+ (r'(#\{)(' + _dot + r'*?)(\})',
+ bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+ }
+
+
+class ScamlLexer(ExtendedRegexLexer):
+ """
+ For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'Scaml'
+ aliases = ['scaml']
+ filenames = ['*.scaml']
+ mimetypes = ['text/x-scaml']
+
+ flags = re.IGNORECASE
+ # Scaml does not yet support the " |\n" notation to
+ # wrap long lines. Once it does, use the custom faux
+ # dot instead.
+ # _dot = r'(?: \|\n(?=.* \|)|.)'
+ _dot = r'.'
+
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Text),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'css': [
+ (r'\.[\w:-]+', Name.Class, 'tag'),
+ (r'\#[\w:-]+', Name.Function, 'tag'),
+ ],
+
+ 'eval-or-plain': [
+ (r'[&!]?==', Punctuation, 'plain'),
+ (r'([&!]?[=~])(' + _dot + r'*\n)',
+ bygroups(Punctuation, using(ScalaLexer)),
+ 'root'),
+ default('plain'),
+ ],
+
+ 'content': [
+ include('css'),
+ (r'%[\w:-]+', Name.Tag, 'tag'),
+ (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
+ (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
+ bygroups(Comment, Comment.Special, Comment),
+ '#pop'),
+ (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
+ '#pop'),
+ (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
+ 'scaml-comment-block'), '#pop'),
+ (r'(-@\s*)(import)?(' + _dot + r'*\n)',
+ bygroups(Punctuation, Keyword, using(ScalaLexer)),
+ '#pop'),
+ (r'(-)(' + _dot + r'*\n)',
+ bygroups(Punctuation, using(ScalaLexer)),
+ '#pop'),
+ (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
+ '#pop'),
+ include('eval-or-plain'),
+ ],
+
+ 'tag': [
+ include('css'),
+ (r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
+ (r'\[' + _dot + r'*?\]', using(ScalaLexer)),
+ (r'\(', Text, 'html-attributes'),
+ (r'/[ \t]*\n', Punctuation, '#pop:2'),
+ (r'[<>]{1,2}(?=[ \t=])', Punctuation),
+ include('eval-or-plain'),
+ ],
+
+ 'plain': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
+ (r'(#\{)(' + _dot + r'*?)(\})',
+ bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'html-attributes': [
+ (r'\s+', Text),
+ (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
+ (r'[\w:-]+', Name.Attribute),
+ (r'\)', Text, '#pop'),
+ ],
+
+ 'html-attribute-value': [
+ (r'[ \t]+', Text),
+ (r'\w+', Name.Variable, '#pop'),
+ (r'@\w+', Name.Variable.Instance, '#pop'),
+ (r'\$\w+', Name.Variable.Global, '#pop'),
+ (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
+ (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
+ ],
+
+ 'html-comment-block': [
+ (_dot + '+', Comment),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'scaml-comment-block': [
+ (_dot + '+', Comment.Preproc),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'filter-block': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
+ (r'(#\{)(' + _dot + r'*?)(\})',
+ bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+ }
+
+
+class PugLexer(ExtendedRegexLexer):
+ """
+ For Pug markup.
+ Pug is a variant of Scaml, see:
+ http://scalate.fusesource.org/documentation/scaml-reference.html
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'Pug'
+ aliases = ['pug', 'jade']
+ filenames = ['*.pug', '*.jade']
+ mimetypes = ['text/x-pug', 'text/x-jade']
+
+ flags = re.IGNORECASE
+ _dot = r'.'
+
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Text),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'css': [
+ (r'\.[\w:-]+', Name.Class, 'tag'),
+ (r'\#[\w:-]+', Name.Function, 'tag'),
+ ],
+
+ 'eval-or-plain': [
+ (r'[&!]?==', Punctuation, 'plain'),
+ (r'([&!]?[=~])(' + _dot + r'*\n)',
+ bygroups(Punctuation, using(ScalaLexer)), 'root'),
+ default('plain'),
+ ],
+
+ 'content': [
+ include('css'),
+ (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
+ (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
+ bygroups(Comment, Comment.Special, Comment),
+ '#pop'),
+ (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
+ '#pop'),
+ (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
+ 'scaml-comment-block'), '#pop'),
+ (r'(-@\s*)(import)?(' + _dot + r'*\n)',
+ bygroups(Punctuation, Keyword, using(ScalaLexer)),
+ '#pop'),
+ (r'(-)(' + _dot + r'*\n)',
+ bygroups(Punctuation, using(ScalaLexer)),
+ '#pop'),
+ (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
+ '#pop'),
+ (r'[\w:-]+', Name.Tag, 'tag'),
+ (r'\|', Text, 'eval-or-plain'),
+ ],
+
+ 'tag': [
+ include('css'),
+ (r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
+ (r'\[' + _dot + r'*?\]', using(ScalaLexer)),
+ (r'\(', Text, 'html-attributes'),
+ (r'/[ \t]*\n', Punctuation, '#pop:2'),
+ (r'[<>]{1,2}(?=[ \t=])', Punctuation),
+ include('eval-or-plain'),
+ ],
+
+ 'plain': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
+ (r'(#\{)(' + _dot + r'*?)(\})',
+ bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'html-attributes': [
+ (r'\s+', Text),
+ (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
+ (r'[\w:-]+', Name.Attribute),
+ (r'\)', Text, '#pop'),
+ ],
+
+ 'html-attribute-value': [
+ (r'[ \t]+', Text),
+ (r'\w+', Name.Variable, '#pop'),
+ (r'@\w+', Name.Variable.Instance, '#pop'),
+ (r'\$\w+', Name.Variable.Global, '#pop'),
+ (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
+ (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
+ ],
+
+ 'html-comment-block': [
+ (_dot + '+', Comment),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'scaml-comment-block': [
+ (_dot + '+', Comment.Preproc),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'filter-block': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
+ (r'(#\{)(' + _dot + r'*?)(\})',
+ bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+ }
+JadeLexer = PugLexer # compat
diff --git a/pygments/lexers/idl.py b/pygments/lexers/idl.py
new file mode 100644
index 0000000..d24f8cd
--- /dev/null
+++ b/pygments/lexers/idl.py
@@ -0,0 +1,285 @@
+"""
+ pygments.lexers.idl
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for IDL.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, words, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
+ String, Whitespace
+
+__all__ = ['IDLLexer']
+
+
+class IDLLexer(RegexLexer):
+ """
+ Pygments Lexer for IDL (Interactive Data Language).
+
+ .. versionadded:: 1.6
+ """
+ name = 'IDL'
+ url = 'https://www.l3harrisgeospatial.com/Software-Technology/IDL'
+ aliases = ['idl']
+ filenames = ['*.pro']
+ mimetypes = ['text/idl']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ _RESERVED = (
+ 'and', 'begin', 'break', 'case', 'common', 'compile_opt',
+ 'continue', 'do', 'else', 'end', 'endcase', 'endelse',
+ 'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
+ 'endwhile', 'eq', 'for', 'foreach', 'forward_function',
+ 'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
+ 'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
+ 'repeat', 'switch', 'then', 'until', 'while', 'xor')
+ """Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
+
+ _BUILTIN_LIB = (
+ 'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
+ 'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
+ 'arg_present', 'array_equal', 'array_indices', 'arrow',
+ 'ascii_template', 'asin', 'assoc', 'atan', 'axis',
+ 'a_correlate', 'bandpass_filter', 'bandreject_filter',
+ 'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
+ 'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
+ 'binomial', 'bin_date', 'bit_ffs', 'bit_population',
+ 'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
+ 'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
+ 'bytscl', 'caldat', 'calendar', 'call_external',
+ 'call_function', 'call_method', 'call_procedure', 'canny',
+ 'catch', 'cd', r'cdf_\w*', 'ceil', 'chebyshev',
+ 'check_math',
+ 'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
+ 'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
+ 'cmyk_convert', 'colorbar', 'colorize_sample',
+ 'colormap_applicable', 'colormap_gradient',
+ 'colormap_rotation', 'colortable', 'color_convert',
+ 'color_exchange', 'color_quan', 'color_range_map', 'comfit',
+ 'command_line_args', 'complex', 'complexarr', 'complexround',
+ 'compute_mesh_normals', 'cond', 'congrid', 'conj',
+ 'constrained_min', 'contour', 'convert_coord', 'convol',
+ 'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
+ 'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
+ 'create_view', 'crossp', 'crvlength', 'cti_test',
+ 'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
+ 'cw_animate', 'cw_animate_getp', 'cw_animate_load',
+ 'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
+ 'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
+ 'cw_form', 'cw_fslider', 'cw_light_editor',
+ 'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
+ 'cw_palette_editor', 'cw_palette_editor_get',
+ 'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
+ 'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
+ 'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
+ 'define_msgblk', 'define_msgblk_from_file', 'defroi',
+ 'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
+ 'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
+ 'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
+ 'dialog_printersetup', 'dialog_printjob',
+ 'dialog_read_image', 'dialog_write_image', 'digital_filter',
+ 'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
+ 'dlm_load', 'dlm_register', 'doc_library', 'double',
+ 'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
+ 'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
+ 'eof', r'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
+ 'erode', 'errorplot', 'errplot', 'estimator_filter',
+ 'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
+ 'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
+ 'file_basename', 'file_chmod', 'file_copy', 'file_delete',
+ 'file_dirname', 'file_expand_path', 'file_info',
+ 'file_lines', 'file_link', 'file_mkdir', 'file_move',
+ 'file_poll_input', 'file_readlink', 'file_same',
+ 'file_search', 'file_test', 'file_which', 'findgen',
+ 'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
+ 'fltarr', 'flush', 'format_axis_values', 'free_lun',
+ 'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
+ 'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
+ 'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
+ 'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
+ 'getwindows', 'get_drive_list', 'get_dxf_objects',
+ 'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
+ 'greg2jul', r'grib_\w*', 'grid3', 'griddata',
+ 'grid_input', 'grid_tps', 'gs_iter',
+ r'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
+ 'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
+ 'hanning', 'hash', r'hdf_\w*', 'heap_free',
+ 'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
+ 'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
+ 'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
+ 'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
+ 'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
+ 'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
+ 'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
+ 'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
+ 'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
+ 'image_cont', 'image_statistics', 'imaginary', 'imap',
+ 'indgen', 'intarr', 'interpol', 'interpolate',
+ 'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
+ 'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
+ 'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
+ 'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
+ 'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
+ 'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
+ 'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
+ 'json_serialize', 'jul2greg', 'julday', 'keyword_set',
+ 'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
+ 'label_region', 'ladfit', 'laguerre', 'laplacian',
+ 'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
+ 'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
+ 'la_gm_linear_model', 'la_hqr', 'la_invert',
+ 'la_least_squares', 'la_least_square_equality',
+ 'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
+ 'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
+ 'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
+ 'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
+ 'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
+ 'lngamma', 'lnp_test', 'loadct', 'locale_get',
+ 'logical_and', 'logical_or', 'logical_true', 'lon64arr',
+ 'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
+ 'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
+ 'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
+ 'map_continents', 'map_grid', 'map_image', 'map_patch',
+ 'map_proj_forward', 'map_proj_image', 'map_proj_info',
+ 'map_proj_init', 'map_proj_inverse', 'map_set',
+ 'matrix_multiply', 'matrix_power', 'max', 'md_test',
+ 'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
+ 'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
+ 'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
+ 'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
+ 'message', 'min', 'min_curve_surf', 'mk_html_help',
+ 'modifyct', 'moment', 'morph_close', 'morph_distance',
+ 'morph_gradient', 'morph_hitormiss', 'morph_open',
+ 'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
+ r'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
+ 'noise_scatter', 'noise_slur', 'norm', 'n_elements',
+ 'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
+ 'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
+ 'online_help', 'on_error', 'open', 'oplot', 'oploterr',
+ 'parse_url', 'particle_trace', 'path_cache', 'path_sep',
+ 'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
+ 'plot_field', 'pnt_line', 'point_lun', 'polarplot',
+ 'polar_contour', 'polar_surface', 'poly', 'polyfill',
+ 'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
+ 'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
+ 'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
+ 'print', 'printd', 'product', 'profile', 'profiler',
+ 'profiles', 'project_vol', 'psafm', 'pseudo',
+ 'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
+ 'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
+ 'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
+ 'query_csv', 'query_dicom', 'query_gif', 'query_image',
+ 'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
+ 'query_png', 'query_ppm', 'query_srf', 'query_tiff',
+ 'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
+ 'rdpix', 'read', 'reads', 'readu', 'read_ascii',
+ 'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
+ 'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
+ 'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
+ 'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
+ 'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
+ 'read_xwd', 'real_part', 'rebin', 'recall_commands',
+ 'recon3', 'reduce_colors', 'reform', 'region_grow',
+ 'register_cursor', 'regress', 'replicate',
+ 'replicate_inplace', 'resolve_all', 'resolve_routine',
+ 'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
+ 'rot', 'rotate', 'round', 'routine_filepath',
+ 'routine_info', 'rs_test', 'r_correlate', 'r_test',
+ 'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
+ 'scope_traceback', 'scope_varfetch', 'scope_varname',
+ 'search2d', 'search3d', 'sem_create', 'sem_delete',
+ 'sem_lock', 'sem_release', 'setenv', 'set_plot',
+ 'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
+ 'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
+ 'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
+ 'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
+ 'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
+ 'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
+ 'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
+ 'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
+ 'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
+ 'streamline', 'stregex', 'stretch', 'string', 'strjoin',
+ 'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
+ 'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
+ 'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
+ 'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
+ 'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
+ 'tanh', 'tek_color', 'temporary', 'tetra_clip',
+ 'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
+ 'timegen', 'time_test2', 'tm_test', 'total', 'trace',
+ 'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
+ 'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
+ 'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
+ 'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
+ 'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
+ 'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
+ 'value_locate', 'variance', 'vector', 'vector_field', 'vel',
+ 'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
+ 'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
+ 'where', 'widget_base', 'widget_button', 'widget_combobox',
+ 'widget_control', 'widget_displaycontextmen', 'widget_draw',
+ 'widget_droplist', 'widget_event', 'widget_info',
+ 'widget_label', 'widget_list', 'widget_propertysheet',
+ 'widget_slider', 'widget_tab', 'widget_table',
+ 'widget_text', 'widget_tree', 'widget_tree_move',
+ 'widget_window', 'wiener_filter', 'window', 'writeu',
+ 'write_bmp', 'write_csv', 'write_gif', 'write_image',
+ 'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
+ 'write_png', 'write_ppm', 'write_spr', 'write_srf',
+ 'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
+ 'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
+ 'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
+ 'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
+ 'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
+ 'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
+ 'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
+ 'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
+ 'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
+ 'xmtool', 'xobjview', 'xobjview_rotate',
+ 'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
+ 'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
+ 'xvolume', 'xvolume_rotate', 'xvolume_write_image',
+ 'xyouts', 'zoom', 'zoom_24')
+ """Functions from: http://www.exelisvis.com/docs/routines-1.html"""
+
+ tokens = {
+ 'root': [
+ (r'(^\s*)(;.*?)(\n)', bygroups(Whitespace, Comment.Single,
+ Whitespace)),
+ (words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin),
+ (r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
+ (r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
+ (r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
+ (r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
+ (r'"[^\"]*"', String.Double),
+ (r"'[^\']*'", String.Single),
+ (r'\b[+\-]?([0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(D|E)?([+\-]?[0-9]+)?\b',
+ Number.Float),
+ (r'\b\'[+\-]?[0-9A-F]+\'X(U?(S?|L{1,2})|B)\b', Number.Hex),
+ (r'\b\'[+\-]?[0-7]+\'O(U?(S?|L{1,2})|B)\b', Number.Oct),
+ (r'\b[+\-]?[0-9]+U?L{1,2}\b', Number.Integer.Long),
+ (r'\b[+\-]?[0-9]+U?S?\b', Number.Integer),
+ (r'\b[+\-]?[0-9]+B\b', Number),
+ (r'[ \t]+', Whitespace),
+ (r'\n', Whitespace),
+ (r'.', Text),
+ ]
+ }
+
+ def analyse_text(text):
+ """endelse seems to be unique to IDL, endswitch is rare at least."""
+ result = 0
+
+ if 'endelse' in text:
+ result += 0.2
+ if 'endswitch' in text:
+ result += 0.01
+
+ return result
diff --git a/pygments/lexers/igor.py b/pygments/lexers/igor.py
new file mode 100644
index 0000000..28b6b35
--- /dev/null
+++ b/pygments/lexers/igor.py
@@ -0,0 +1,420 @@
+"""
+ pygments.lexers.igor
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Igor Pro.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Keyword, Name, String, Whitespace
+
+__all__ = ['IgorLexer']
+
+
+class IgorLexer(RegexLexer):
+ """
+ Pygments Lexer for Igor Pro procedure files (.ipf).
+ See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Igor'
+ aliases = ['igor', 'igorpro']
+ filenames = ['*.ipf']
+ mimetypes = ['text/ipf']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ flowControl = (
+ 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
+ 'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry',
+ 'break', 'continue', 'return', 'AbortOnRTE', 'AbortOnValue'
+ )
+ types = (
+ 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
+ 'STRUCT', 'dfref', 'funcref', 'char', 'uchar', 'int16', 'uint16', 'int32',
+ 'uint32', 'int64', 'uint64', 'float', 'double'
+ )
+ keywords = (
+ 'override', 'ThreadSafe', 'MultiThread', 'static', 'Proc',
+ 'Picture', 'Prompt', 'DoPrompt', 'macro', 'window', 'function', 'end',
+ 'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu'
+ )
+ operations = (
+ 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', 'AddMovieFrame',
+ 'AddWavesToBoxPlot', 'AddWavesToViolinPlot', 'AdoptFiles', 'APMath', 'Append',
+ 'AppendBoxPlot', 'AppendImage', 'AppendLayoutObject', 'AppendMatrixContour',
+ 'AppendText', 'AppendToGizmo', 'AppendToGraph', 'AppendToLayout', 'AppendToTable',
+ 'AppendViolinPlot', 'AppendXYZContour', 'AutoPositionWindow',
+ 'AxonTelegraphFindServers', 'BackgroundInfo', 'Beep', 'BoundingBall', 'BoxSmooth',
+ 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', 'CheckDisplayed',
+ 'ChooseColor', 'Close', 'CloseHelp', 'CloseMovie', 'CloseProc', 'ColorScale',
+ 'ColorTab2Wave', 'Concatenate', 'ControlBar', 'ControlInfo', 'ControlUpdate',
+ 'ConvertGlobalStringTextEncoding', 'ConvexHull', 'Convolve', 'CopyDimLabels',
+ 'CopyFile', 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut',
+ 'CreateBrowser', 'Cross', 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground',
+ 'Cursor', 'CurveFit', 'CustomControl', 'CWT', 'DAQmx_AI_SetupReader',
+ 'DAQmx_AO_SetOutputs', 'DAQmx_CTR_CountEdges', 'DAQmx_CTR_OutputPulse',
+ 'DAQmx_CTR_Period', 'DAQmx_CTR_PulseWidth', 'DAQmx_DIO_Config',
+ 'DAQmx_DIO_WriteNewData', 'DAQmx_Scan', 'DAQmx_WaveformGen', 'Debugger',
+ 'DebuggerOptions', 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont',
+ 'DefaultTextEncoding', 'DefineGuide', 'DelayUpdate', 'DeleteAnnotations',
+ 'DeleteFile', 'DeleteFolder', 'DeletePoints', 'Differentiate', 'dir', 'Display',
+ 'DisplayHelpTopic', 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate',
+ 'DoWindow', 'DoXOPIdle', 'DPSS', 'DrawAction', 'DrawArc', 'DrawBezier',
+ 'DrawLine', 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect',
+ 'DrawText', 'DrawUserShape', 'DSPDetrend', 'DSPPeriodogram', 'Duplicate',
+ 'DuplicateDataFolder', 'DWT', 'EdgeStats', 'Edit', 'ErrorBars',
+ 'EstimatePeakSizes', 'Execute', 'ExecuteScriptText', 'ExperimentInfo',
+ 'ExperimentModified', 'ExportGizmo', 'Extract', 'FastGaussTransform', 'FastOp',
+ 'FBinRead', 'FBinWrite', 'FFT', 'FGetPos', 'FIFOStatus', 'FIFO2Wave', 'FilterFIR',
+ 'FilterIIR', 'FindAPeak', 'FindContour', 'FindDuplicates', 'FindLevel',
+ 'FindLevels', 'FindPeak', 'FindPointsInPoly', 'FindRoots', 'FindSequence',
+ 'FindValue', 'FMaxFlat', 'FPClustering', 'fprintf', 'FReadLine', 'FSetPos',
+ 'FStatus', 'FTPCreateDirectory', 'FTPDelete', 'FTPDownload', 'FTPUpload',
+ 'FuncFit', 'FuncFitMD', 'GBLoadWave', 'GetAxis', 'GetCamera', 'GetFileFolderInfo',
+ 'GetGizmo', 'GetLastUserMenuInfo', 'GetMarquee', 'GetMouse', 'GetSelection',
+ 'GetWindow', 'GISCreateVectorLayer', 'GISGetRasterInfo',
+ 'GISGetRegisteredFileInfo', 'GISGetVectorLayerInfo', 'GISLoadRasterData',
+ 'GISLoadVectorData', 'GISRasterizeVectorData', 'GISRegisterFile',
+ 'GISTransformCoords', 'GISUnRegisterFile', 'GISWriteFieldData',
+ 'GISWriteGeometryData', 'GISWriteRaster', 'GPIBReadBinaryWave2',
+ 'GPIBReadBinary2', 'GPIBReadWave2', 'GPIBRead2', 'GPIBWriteBinaryWave2',
+ 'GPIBWriteBinary2', 'GPIBWriteWave2', 'GPIBWrite2', 'GPIB2', 'GraphNormal',
+ 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', 'Hanning', 'HDFInfo',
+ 'HDFReadImage', 'HDFReadSDS', 'HDFReadVset', 'HDF5CloseFile', 'HDF5CloseGroup',
+ 'HDF5ConvertColors', 'HDF5CreateFile', 'HDF5CreateGroup', 'HDF5CreateLink',
+ 'HDF5Dump', 'HDF5DumpErrors', 'HDF5DumpState', 'HDF5FlushFile',
+ 'HDF5ListAttributes', 'HDF5ListGroup', 'HDF5LoadData', 'HDF5LoadGroup',
+ 'HDF5LoadImage', 'HDF5OpenFile', 'HDF5OpenGroup', 'HDF5SaveData', 'HDF5SaveGroup',
+ 'HDF5SaveImage', 'HDF5TestOperation', 'HDF5UnlinkObject', 'HideIgorMenus',
+ 'HideInfo', 'HideProcedures', 'HideTools', 'HilbertTransform', 'Histogram', 'ICA',
+ 'IFFT', 'ImageAnalyzeParticles', 'ImageBlend', 'ImageBoundaryToMask',
+ 'ImageComposite', 'ImageEdgeDetection', 'ImageFileInfo', 'ImageFilter',
+ 'ImageFocus', 'ImageFromXYZ', 'ImageGenerateROIMask', 'ImageGLCM',
+ 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', 'ImageLineProfile',
+ 'ImageLoad', 'ImageMorphology', 'ImageRegistration', 'ImageRemoveBackground',
+ 'ImageRestore', 'ImageRotate', 'ImageSave', 'ImageSeedFill', 'ImageSkeleton3d',
+ 'ImageSnake', 'ImageStats', 'ImageThreshold', 'ImageTransform',
+ 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', 'InsertPoints', 'Integrate',
+ 'IntegrateODE', 'Integrate2D', 'Interpolate2', 'Interpolate3D', 'Interp3DPath',
+ 'ITCCloseAll2', 'ITCCloseDevice2', 'ITCConfigAllChannels2',
+ 'ITCConfigChannelReset2', 'ITCConfigChannelUpload2', 'ITCConfigChannel2',
+ 'ITCFIFOAvailableAll2', 'ITCFIFOAvailable2', 'ITCGetAllChannelsConfig2',
+ 'ITCGetChannelConfig2', 'ITCGetCurrentDevice2', 'ITCGetDeviceInfo2',
+ 'ITCGetDevices2', 'ITCGetErrorString2', 'ITCGetSerialNumber2', 'ITCGetState2',
+ 'ITCGetVersions2', 'ITCInitialize2', 'ITCOpenDevice2', 'ITCReadADC2',
+ 'ITCReadDigital2', 'ITCReadTimer2', 'ITCSelectDevice2', 'ITCSetDAC2',
+ 'ITCSetGlobals2', 'ITCSetModes2', 'ITCSetState2', 'ITCStartAcq2', 'ITCStopAcq2',
+ 'ITCUpdateFIFOPositionAll2', 'ITCUpdateFIFOPosition2', 'ITCWriteDigital2',
+ 'JCAMPLoadWave', 'JointHistogram', 'KillBackground', 'KillControl',
+ 'KillDataFolder', 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs',
+ 'KillStrings', 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label',
+ 'Layout', 'LayoutPageAction', 'LayoutSlideShow', 'Legend',
+ 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', 'LoadPackagePreferences',
+ 'LoadPICT', 'LoadWave', 'Loess', 'LombPeriodogram', 'Make', 'MakeIndex',
+ 'MarkPerfTestTime', 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV',
+ 'MatrixFilter', 'MatrixGaussJ', 'MatrixGLM', 'MatrixInverse', 'MatrixLinearSolve',
+ 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', 'MatrixLUDTD',
+ 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', 'MatrixSVBkSub',
+ 'MatrixSVD', 'MatrixTranspose', 'MCC_FindServers', 'MeasureStyledText',
+ 'MFR_CheckForNewBricklets',
+ 'MFR_CloseResultFile', 'MFR_CreateOverviewTable', 'MFR_GetBrickletCount',
+ 'MFR_GetBrickletData', 'MFR_GetBrickletDeployData', 'MFR_GetBrickletMetaData',
+ 'MFR_GetBrickletRawData', 'MFR_GetReportTemplate', 'MFR_GetResultFileMetaData',
+ 'MFR_GetResultFileName', 'MFR_GetVernissageVersion', 'MFR_GetVersion',
+ 'MFR_GetXOPErrorMessage', 'MFR_OpenResultFile',
+ 'MLLoadWave', 'Modify', 'ModifyBoxPlot', 'ModifyBrowser', 'ModifyCamera',
+ 'ModifyContour', 'ModifyControl', 'ModifyControlList', 'ModifyFreeAxis',
+ 'ModifyGizmo', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', 'ModifyPanel',
+ 'ModifyTable', 'ModifyViolinPlot', 'ModifyWaterfall', 'MoveDataFolder',
+ 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
+ 'MoveWave', 'MoveWindow', 'MultiTaperPSD', 'MultiThreadingControl',
+ 'NC_CloseFile', 'NC_DumpErrors', 'NC_Inquire', 'NC_ListAttributes',
+ 'NC_ListObjects', 'NC_LoadData', 'NC_OpenFile', 'NeuralNetworkRun',
+ 'NeuralNetworkTrain', 'NewCamera', 'NewDataFolder', 'NewFIFO', 'NewFIFOChan',
+ 'NewFreeAxis', 'NewGizmo', 'NewImage', 'NewLayout', 'NewMovie', 'NewNotebook',
+ 'NewPanel', 'NewPath', 'NewWaterfall', 'NILoadWave', 'NI4882', 'Note', 'Notebook',
+ 'NotebookAction', 'Open', 'OpenHelp', 'OpenNotebook', 'Optimize',
+ 'ParseOperationTemplate', 'PathInfo', 'PauseForUser', 'PauseUpdate', 'PCA',
+ 'PlayMovie', 'PlayMovieAction', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
+ 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', 'PrintLayout',
+ 'PrintNotebook', 'PrintSettings', 'PrintTable', 'Project', 'PulseStats',
+ 'PutScrapText', 'pwd', 'Quit', 'RatioFromNumber', 'Redimension', 'Remez',
+ 'Remove', 'RemoveContour', 'RemoveFromGizmo', 'RemoveFromGraph',
+ 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', 'RemoveLayoutObjects',
+ 'RemovePath', 'Rename', 'RenameDataFolder', 'RenamePath', 'RenamePICT',
+ 'RenameWindow', 'ReorderImages', 'ReorderTraces', 'ReplaceText', 'ReplaceWave',
+ 'Resample', 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
+ 'SaveExperiment', 'SaveGizmoCopy', 'SaveGraphCopy', 'SaveNotebook',
+ 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', 'SetActiveSubwindow',
+ 'SetAxis', 'SetBackground', 'SetDashPattern', 'SetDataFolder', 'SetDimLabel',
+ 'SetDrawEnv', 'SetDrawLayer', 'SetFileFolderInfo', 'SetFormula', 'SetIdlePeriod',
+ 'SetIgorHook', 'SetIgorMenuMode', 'SetIgorOption', 'SetMarquee',
+ 'SetProcessSleep', 'SetRandomSeed', 'SetScale', 'SetVariable', 'SetWaveLock',
+ 'SetWaveTextEncoding', 'SetWindow', 'ShowIgorMenus', 'ShowInfo', 'ShowTools',
+ 'Silent', 'Sleep', 'Slider', 'Smooth', 'SmoothCustom', 'Sort', 'SortColumns',
+ 'SoundInRecord', 'SoundInSet', 'SoundInStartChart', 'SoundInStatus',
+ 'SoundInStopChart', 'SoundLoadWave', 'SoundSaveWave', 'SphericalInterpolate',
+ 'SphericalTriangulate', 'SplitString', 'SplitWave', 'sprintf', 'SQLHighLevelOp',
+ 'sscanf', 'Stack', 'StackWindows', 'StatsAngularDistanceTest', 'StatsANOVA1Test',
+ 'StatsANOVA2NRTest', 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
+ 'StatsCircularCorrelationTest', 'StatsCircularMeans', 'StatsCircularMoments',
+ 'StatsCircularTwoSampleTest', 'StatsCochranTest', 'StatsContingencyTable',
+ 'StatsDIPTest', 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
+ 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKDE', 'StatsKendallTauTest',
+ 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
+ 'StatsLinearRegression', 'StatsMultiCorrelationTest', 'StatsNPMCTest',
+ 'StatsNPNominalSRTest', 'StatsQuantiles', 'StatsRankCorrelationTest',
+ 'StatsResample', 'StatsSample', 'StatsScheffeTest', 'StatsShapiroWilkTest',
+ 'StatsSignTest', 'StatsSRTest', 'StatsTTest', 'StatsTukeyTest',
+ 'StatsVariancesTest', 'StatsWatsonUSquaredTest', 'StatsWatsonWilliamsTest',
+ 'StatsWheelerWatsonTest', 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest',
+ 'STFT', 'String', 'StructFill', 'StructGet', 'StructPut', 'SumDimension',
+ 'SumSeries', 'TabControl', 'Tag', 'TDMLoadData', 'TDMSaveData', 'TextBox',
+ 'ThreadGroupPutDF', 'ThreadStart', 'TickWavesFromAxis', 'Tile', 'TileWindows',
+ 'TitleBox', 'ToCommandLine', 'ToolsGrid', 'Triangulate3d', 'Unwrap', 'URLRequest',
+ 'ValDisplay', 'Variable', 'VDTClosePort2', 'VDTGetPortList2', 'VDTGetStatus2',
+ 'VDTOpenPort2', 'VDTOperationsPort2', 'VDTReadBinaryWave2', 'VDTReadBinary2',
+ 'VDTReadHexWave2', 'VDTReadHex2', 'VDTReadWave2', 'VDTRead2', 'VDTTerminalPort2',
+ 'VDTWriteBinaryWave2', 'VDTWriteBinary2', 'VDTWriteHexWave2', 'VDTWriteHex2',
+ 'VDTWriteWave2', 'VDTWrite2', 'VDT2', 'VISAControl', 'VISARead', 'VISAReadBinary',
+ 'VISAReadBinaryWave', 'VISAReadWave', 'VISAWrite', 'VISAWriteBinary',
+ 'VISAWriteBinaryWave', 'VISAWriteWave', 'WaveMeanStdv', 'WaveStats',
+ 'WaveTransform', 'wfprintf', 'WignerTransform', 'WindowFunction', 'XLLoadWave'
+ )
+ functions = (
+ 'abs', 'acos', 'acosh', 'AddListItem', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD',
+ 'alog', 'AnnotationInfo', 'AnnotationList', 'area', 'areaXY', 'asin', 'asinh',
+ 'atan', 'atanh', 'atan2', 'AxisInfo', 'AxisList', 'AxisValFromPixel',
+ 'AxonTelegraphAGetDataNum', 'AxonTelegraphAGetDataString',
+ 'AxonTelegraphAGetDataStruct', 'AxonTelegraphGetDataNum',
+ 'AxonTelegraphGetDataString', 'AxonTelegraphGetDataStruct',
+ 'AxonTelegraphGetTimeoutMs', 'AxonTelegraphSetTimeoutMs', 'Base64Decode',
+ 'Base64Encode', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'beta', 'betai',
+ 'BinarySearch', 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise',
+ 'cabs', 'CaptureHistory', 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num',
+ 'chebyshev', 'chebyshevU', 'CheckName', 'ChildWindowList', 'CleanupName', 'cmplx',
+ 'cmpstr', 'conj', 'ContourInfo', 'ContourNameList', 'ContourNameToWaveRef',
+ 'ContourZ', 'ControlNameList', 'ConvertTextEncoding', 'cos', 'cosh',
+ 'cosIntegral', 'cot', 'coth', 'CountObjects', 'CountObjectsDFR', 'cpowi',
+ 'CreationDate', 'csc', 'csch', 'CsrInfo', 'CsrWave', 'CsrWaveRef', 'CsrXWave',
+ 'CsrXWaveRef', 'CTabList', 'DataFolderDir', 'DataFolderExists',
+ 'DataFolderRefsEqual', 'DataFolderRefStatus', 'date', 'datetime', 'DateToJulian',
+ 'date2secs', 'Dawson', 'defined', 'deltax', 'digamma', 'dilogarithm', 'DimDelta',
+ 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', 'erfc', 'erfcw',
+ 'exists', 'exp', 'expInt', 'expIntegralE1', 'expNoise', 'factorial', 'Faddeeva',
+ 'fakedata', 'faverage', 'faverageXY', 'fDAQmx_AI_GetReader',
+ 'fDAQmx_AO_UpdateOutputs', 'fDAQmx_ConnectTerminals', 'fDAQmx_CTR_Finished',
+ 'fDAQmx_CTR_IsFinished', 'fDAQmx_CTR_IsPulseFinished', 'fDAQmx_CTR_ReadCounter',
+ 'fDAQmx_CTR_ReadWithOptions', 'fDAQmx_CTR_SetPulseFrequency', 'fDAQmx_CTR_Start',
+ 'fDAQmx_DeviceNames', 'fDAQmx_DIO_Finished', 'fDAQmx_DIO_PortWidth',
+ 'fDAQmx_DIO_Read', 'fDAQmx_DIO_Write', 'fDAQmx_DisconnectTerminals',
+ 'fDAQmx_ErrorString', 'fDAQmx_ExternalCalDate', 'fDAQmx_NumAnalogInputs',
+ 'fDAQmx_NumAnalogOutputs', 'fDAQmx_NumCounters', 'fDAQmx_NumDIOPorts',
+ 'fDAQmx_ReadChan', 'fDAQmx_ReadNamedChan', 'fDAQmx_ResetDevice',
+ 'fDAQmx_ScanGetAvailable', 'fDAQmx_ScanGetNextIndex', 'fDAQmx_ScanStart',
+ 'fDAQmx_ScanStop', 'fDAQmx_ScanWait', 'fDAQmx_ScanWaitWithTimeout',
+ 'fDAQmx_SelfCalDate', 'fDAQmx_SelfCalibration', 'fDAQmx_WaveformStart',
+ 'fDAQmx_WaveformStop', 'fDAQmx_WF_IsFinished', 'fDAQmx_WF_WaitUntilFinished',
+ 'fDAQmx_WriteChan', 'FetchURL', 'FindDimLabel', 'FindListItem', 'floor',
+ 'FontList', 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
+ 'FuncRefInfo', 'FunctionInfo', 'FunctionList', 'FunctionPath', 'gamma',
+ 'gammaEuler', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
+ 'Gauss1D', 'Gauss2D', 'gcd', 'GetBrowserLine', 'GetBrowserSelection',
+ 'GetDataFolder', 'GetDataFolderDFR', 'GetDefaultFont', 'GetDefaultFontSize',
+ 'GetDefaultFontStyle', 'GetDimLabel', 'GetEnvironmentVariable', 'GetErrMessage',
+ 'GetFormula', 'GetIndependentModuleName', 'GetIndexedObjName',
+ 'GetIndexedObjNameDFR', 'GetKeyState', 'GetRTErrMessage', 'GetRTError',
+ 'GetRTLocation', 'GetRTLocInfo', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
+ 'GetWavesDataFolder', 'GetWavesDataFolderDFR', 'GISGetAllFileFormats',
+ 'GISSRefsAreEqual', 'GizmoInfo', 'GizmoScale', 'gnoise', 'GrepList', 'GrepString',
+ 'GuideInfo', 'GuideNameList', 'Hash', 'hcsr', 'HDF5AttributeInfo',
+ 'HDF5DatasetInfo', 'HDF5LibraryInfo', 'HDF5TypeInfo', 'hermite', 'hermiteGauss',
+ 'HyperGNoise', 'HyperGPFQ', 'HyperG0F1', 'HyperG1F1', 'HyperG2F1', 'IgorInfo',
+ 'IgorVersion', 'imag', 'ImageInfo', 'ImageNameList', 'ImageNameToWaveRef',
+ 'IndependentModuleList', 'IndexedDir', 'IndexedFile', 'IndexToScale', 'Inf',
+ 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', 'inverseERF', 'inverseERFC',
+ 'ItemsInList', 'JacobiCn', 'JacobiSn', 'JulianToDate', 'Laguerre', 'LaguerreA',
+ 'LaguerreGauss', 'LambertW', 'LayoutInfo', 'leftx', 'LegendreA', 'limit',
+ 'ListMatch', 'ListToTextWave', 'ListToWaveRefWave', 'ln', 'log', 'logNormalNoise',
+ 'lorentzianNoise', 'LowerStr', 'MacroList', 'magsqr', 'MandelbrotPoint',
+ 'MarcumQ', 'MatrixCondition', 'MatrixDet', 'MatrixDot', 'MatrixRank',
+ 'MatrixTrace', 'max', 'MCC_AutoBridgeBal', 'MCC_AutoFastComp',
+ 'MCC_AutoPipetteOffset', 'MCC_AutoSlowComp', 'MCC_AutoWholeCellComp',
+ 'MCC_GetBridgeBalEnable', 'MCC_GetBridgeBalResist', 'MCC_GetFastCompCap',
+ 'MCC_GetFastCompTau', 'MCC_GetHolding', 'MCC_GetHoldingEnable', 'MCC_GetMode',
+ 'MCC_GetNeutralizationCap', 'MCC_GetNeutralizationEnable',
+ 'MCC_GetOscKillerEnable', 'MCC_GetPipetteOffset', 'MCC_GetPrimarySignalGain',
+ 'MCC_GetPrimarySignalHPF', 'MCC_GetPrimarySignalLPF', 'MCC_GetRsCompBandwidth',
+ 'MCC_GetRsCompCorrection', 'MCC_GetRsCompEnable', 'MCC_GetRsCompPrediction',
+ 'MCC_GetSecondarySignalGain', 'MCC_GetSecondarySignalLPF', 'MCC_GetSlowCompCap',
+ 'MCC_GetSlowCompTau', 'MCC_GetSlowCompTauX20Enable',
+ 'MCC_GetSlowCurrentInjEnable', 'MCC_GetSlowCurrentInjLevel',
+ 'MCC_GetSlowCurrentInjSetlTime', 'MCC_GetWholeCellCompCap',
+ 'MCC_GetWholeCellCompEnable', 'MCC_GetWholeCellCompResist',
+ 'MCC_SelectMultiClamp700B', 'MCC_SetBridgeBalEnable', 'MCC_SetBridgeBalResist',
+ 'MCC_SetFastCompCap', 'MCC_SetFastCompTau', 'MCC_SetHolding',
+ 'MCC_SetHoldingEnable', 'MCC_SetMode', 'MCC_SetNeutralizationCap',
+ 'MCC_SetNeutralizationEnable', 'MCC_SetOscKillerEnable', 'MCC_SetPipetteOffset',
+ 'MCC_SetPrimarySignalGain', 'MCC_SetPrimarySignalHPF', 'MCC_SetPrimarySignalLPF',
+ 'MCC_SetRsCompBandwidth', 'MCC_SetRsCompCorrection', 'MCC_SetRsCompEnable',
+ 'MCC_SetRsCompPrediction', 'MCC_SetSecondarySignalGain',
+ 'MCC_SetSecondarySignalLPF', 'MCC_SetSlowCompCap', 'MCC_SetSlowCompTau',
+ 'MCC_SetSlowCompTauX20Enable', 'MCC_SetSlowCurrentInjEnable',
+ 'MCC_SetSlowCurrentInjLevel', 'MCC_SetSlowCurrentInjSetlTime', 'MCC_SetTimeoutMs',
+ 'MCC_SetWholeCellCompCap', 'MCC_SetWholeCellCompEnable',
+ 'MCC_SetWholeCellCompResist', 'mean', 'median', 'min', 'mod', 'ModDate',
+ 'MPFXEMGPeak', 'MPFXExpConvExpPeak', 'MPFXGaussPeak', 'MPFXLorenzianPeak',
+ 'MPFXVoigtPeak', 'NameOfWave', 'NaN', 'NewFreeDataFolder', 'NewFreeWave', 'norm',
+ 'NormalizeUnicode', 'note', 'NumberByKey', 'numpnts', 'numtype',
+ 'NumVarOrDefault', 'num2char', 'num2istr', 'num2str', 'NVAR_Exists',
+ 'OperationList', 'PadString', 'PanelResolution', 'ParamIsDefault',
+ 'ParseFilePath', 'PathList', 'pcsr', 'Pi', 'PICTInfo', 'PICTList',
+ 'PixelFromAxisVal', 'pnt2x', 'poissonNoise', 'poly', 'PolygonArea', 'poly2D',
+ 'PossiblyQuoteName', 'ProcedureText', 'p2rect', 'qcsr', 'real', 'RemoveByKey',
+ 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', 'ReplaceNumberByKey',
+ 'ReplaceString', 'ReplaceStringByKey', 'rightx', 'round', 'r2polar', 'sawtooth',
+ 'scaleToIndex', 'ScreenResolution', 'sec', 'sech', 'Secs2Date', 'Secs2Time',
+ 'SelectNumber', 'SelectString', 'SetEnvironmentVariable', 'sign', 'sin', 'sinc',
+ 'sinh', 'sinIntegral', 'SortList', 'SpecialCharacterInfo', 'SpecialCharacterList',
+ 'SpecialDirPath', 'SphericalBessJ', 'SphericalBessJD', 'SphericalBessY',
+ 'SphericalBessYD', 'SphericalHarmonics', 'SQLAllocHandle', 'SQLAllocStmt',
+ 'SQLBinaryWavesToTextWave', 'SQLBindCol', 'SQLBindParameter', 'SQLBrowseConnect',
+ 'SQLBulkOperations', 'SQLCancel', 'SQLCloseCursor', 'SQLColAttributeNum',
+ 'SQLColAttributeStr', 'SQLColumnPrivileges', 'SQLColumns', 'SQLConnect',
+ 'SQLDataSources', 'SQLDescribeCol', 'SQLDescribeParam', 'SQLDisconnect',
+ 'SQLDriverConnect', 'SQLDrivers', 'SQLEndTran', 'SQLError', 'SQLExecDirect',
+ 'SQLExecute', 'SQLFetch', 'SQLFetchScroll', 'SQLForeignKeys', 'SQLFreeConnect',
+ 'SQLFreeEnv', 'SQLFreeHandle', 'SQLFreeStmt', 'SQLGetConnectAttrNum',
+ 'SQLGetConnectAttrStr', 'SQLGetCursorName', 'SQLGetDataNum', 'SQLGetDataStr',
+ 'SQLGetDescFieldNum', 'SQLGetDescFieldStr', 'SQLGetDescRec', 'SQLGetDiagFieldNum',
+ 'SQLGetDiagFieldStr', 'SQLGetDiagRec', 'SQLGetEnvAttrNum', 'SQLGetEnvAttrStr',
+ 'SQLGetFunctions', 'SQLGetInfoNum', 'SQLGetInfoStr', 'SQLGetStmtAttrNum',
+ 'SQLGetStmtAttrStr', 'SQLGetTypeInfo', 'SQLMoreResults', 'SQLNativeSql',
+ 'SQLNumParams', 'SQLNumResultCols', 'SQLNumResultRowsIfKnown',
+ 'SQLNumRowsFetched', 'SQLParamData', 'SQLPrepare', 'SQLPrimaryKeys',
+ 'SQLProcedureColumns', 'SQLProcedures', 'SQLPutData', 'SQLReinitialize',
+ 'SQLRowCount', 'SQLSetConnectAttrNum', 'SQLSetConnectAttrStr', 'SQLSetCursorName',
+ 'SQLSetDescFieldNum', 'SQLSetDescFieldStr', 'SQLSetDescRec', 'SQLSetEnvAttrNum',
+ 'SQLSetEnvAttrStr', 'SQLSetPos', 'SQLSetStmtAttrNum', 'SQLSetStmtAttrStr',
+ 'SQLSpecialColumns', 'SQLStatistics', 'SQLTablePrivileges', 'SQLTables',
+ 'SQLTextWaveToBinaryWaves', 'SQLTextWaveTo2DBinaryWave', 'SQLUpdateBoundValues',
+ 'SQLXOPCheckState', 'SQL2DBinaryWaveToTextWave', 'sqrt', 'StartMSTimer',
+ 'StatsBetaCDF', 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
+ 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', 'StatsCMSSDCDF',
+ 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', 'StatsErlangCDF',
+ 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', 'StatsEValuePDF',
+ 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', 'StatsFPDF', 'StatsFriedmanCDF',
+ 'StatsGammaCDF', 'StatsGammaPDF', 'StatsGeometricCDF', 'StatsGeometricPDF',
+ 'StatsGEVCDF', 'StatsGEVPDF', 'StatsHyperGCDF', 'StatsHyperGPDF',
+ 'StatsInvBetaCDF', 'StatsInvBinomialCDF', 'StatsInvCauchyCDF', 'StatsInvChiCDF',
+ 'StatsInvCMSSDCDF', 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
+ 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', 'StatsInvGeometricCDF',
+ 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', 'StatsInvLogNormalCDF',
+ 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', 'StatsInvNBinomialCDF',
+ 'StatsInvNCChiCDF', 'StatsInvNCFCDF', 'StatsInvNormalCDF', 'StatsInvParetoCDF',
+ 'StatsInvPoissonCDF', 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
+ 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
+ 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
+ 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
+ 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', 'StatsLogNormalCDF',
+ 'StatsLogNormalPDF', 'StatsMaxwellCDF', 'StatsMaxwellPDF', 'StatsMedian',
+ 'StatsMooreCDF', 'StatsNBinomialCDF', 'StatsNBinomialPDF', 'StatsNCChiCDF',
+ 'StatsNCChiPDF', 'StatsNCFCDF', 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF',
+ 'StatsNormalCDF', 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF',
+ 'StatsPermute', 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
+ 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', 'StatsRayleighCDF',
+ 'StatsRayleighPDF', 'StatsRectangularCDF', 'StatsRectangularPDF', 'StatsRunsCDF',
+ 'StatsSpearmanRhoCDF', 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
+ 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
+ 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', 'StatsVonMisesPDF',
+ 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', 'StatsWeibullPDF',
+ 'StopMSTimer', 'StringByKey', 'stringCRC', 'StringFromList', 'StringList',
+ 'stringmatch', 'strlen', 'strsearch', 'StrVarOrDefault', 'str2num', 'StudentA',
+ 'StudentT', 'sum', 'SVAR_Exists', 'TableInfo', 'TagVal', 'TagWaveRef', 'tan',
+ 'tango_close_device', 'tango_command_inout', 'tango_compute_image_proj',
+ 'tango_get_dev_attr_list', 'tango_get_dev_black_box', 'tango_get_dev_cmd_list',
+ 'tango_get_dev_status', 'tango_get_dev_timeout', 'tango_get_error_stack',
+ 'tango_open_device', 'tango_ping_device', 'tango_read_attribute',
+ 'tango_read_attributes', 'tango_reload_dev_interface',
+ 'tango_resume_attr_monitor', 'tango_set_attr_monitor_period',
+ 'tango_set_dev_timeout', 'tango_start_attr_monitor', 'tango_stop_attr_monitor',
+ 'tango_suspend_attr_monitor', 'tango_write_attribute', 'tango_write_attributes',
+ 'tanh', 'TDMAddChannel', 'TDMAddGroup', 'TDMAppendDataValues',
+ 'TDMAppendDataValuesTime', 'TDMChannelPropertyExists', 'TDMCloseChannel',
+ 'TDMCloseFile', 'TDMCloseGroup', 'TDMCreateChannelProperty', 'TDMCreateFile',
+ 'TDMCreateFileProperty', 'TDMCreateGroupProperty', 'TDMFilePropertyExists',
+ 'TDMGetChannelPropertyNames', 'TDMGetChannelPropertyNum',
+ 'TDMGetChannelPropertyStr', 'TDMGetChannelPropertyTime',
+ 'TDMGetChannelPropertyType', 'TDMGetChannels', 'TDMGetChannelStringPropertyLen',
+ 'TDMGetDataType', 'TDMGetDataValues', 'TDMGetDataValuesTime',
+ 'TDMGetFilePropertyNames', 'TDMGetFilePropertyNum', 'TDMGetFilePropertyStr',
+ 'TDMGetFilePropertyTime', 'TDMGetFilePropertyType', 'TDMGetFileStringPropertyLen',
+ 'TDMGetGroupPropertyNames', 'TDMGetGroupPropertyNum', 'TDMGetGroupPropertyStr',
+ 'TDMGetGroupPropertyTime', 'TDMGetGroupPropertyType', 'TDMGetGroups',
+ 'TDMGetGroupStringPropertyLen', 'TDMGetLibraryErrorDescription',
+ 'TDMGetNumChannelProperties', 'TDMGetNumChannels', 'TDMGetNumDataValues',
+ 'TDMGetNumFileProperties', 'TDMGetNumGroupProperties', 'TDMGetNumGroups',
+ 'TDMGroupPropertyExists', 'TDMOpenFile', 'TDMOpenFileEx', 'TDMRemoveChannel',
+ 'TDMRemoveGroup', 'TDMReplaceDataValues', 'TDMReplaceDataValuesTime',
+ 'TDMSaveFile', 'TDMSetChannelPropertyNum', 'TDMSetChannelPropertyStr',
+ 'TDMSetChannelPropertyTime', 'TDMSetDataValues', 'TDMSetDataValuesTime',
+ 'TDMSetFilePropertyNum', 'TDMSetFilePropertyStr', 'TDMSetFilePropertyTime',
+ 'TDMSetGroupPropertyNum', 'TDMSetGroupPropertyStr', 'TDMSetGroupPropertyTime',
+ 'TextEncodingCode', 'TextEncodingName', 'TextFile', 'ThreadGroupCreate',
+ 'ThreadGroupGetDF', 'ThreadGroupGetDFR', 'ThreadGroupRelease', 'ThreadGroupWait',
+ 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', 'time', 'TraceFromPixel',
+ 'TraceInfo', 'TraceNameList', 'TraceNameToWaveRef', 'TrimString', 'trunc',
+ 'UniqueName', 'UnPadString', 'UnsetEnvironmentVariable', 'UpperStr', 'URLDecode',
+ 'URLEncode', 'VariableList', 'Variance', 'vcsr', 'viAssertIntrSignal',
+ 'viAssertTrigger', 'viAssertUtilSignal', 'viClear', 'viClose', 'viDisableEvent',
+ 'viDiscardEvents', 'viEnableEvent', 'viFindNext', 'viFindRsrc', 'viGetAttribute',
+ 'viGetAttributeString', 'viGpibCommand', 'viGpibControlATN', 'viGpibControlREN',
+ 'viGpibPassControl', 'viGpibSendIFC', 'viIn8', 'viIn16', 'viIn32', 'viLock',
+ 'viMapAddress', 'viMapTrigger', 'viMemAlloc', 'viMemFree', 'viMoveIn8',
+ 'viMoveIn16', 'viMoveIn32', 'viMoveOut8', 'viMoveOut16', 'viMoveOut32', 'viOpen',
+ 'viOpenDefaultRM', 'viOut8', 'viOut16', 'viOut32', 'viPeek8', 'viPeek16',
+ 'viPeek32', 'viPoke8', 'viPoke16', 'viPoke32', 'viRead', 'viReadSTB',
+ 'viSetAttribute', 'viSetAttributeString', 'viStatusDesc', 'viTerminate',
+ 'viUnlock', 'viUnmapAddress', 'viUnmapTrigger', 'viUsbControlIn',
+ 'viUsbControlOut', 'viVxiCommandQuery', 'viWaitOnEvent', 'viWrite', 'VoigtFunc',
+ 'VoigtPeak', 'WaveCRC', 'WaveDims', 'WaveExists', 'WaveHash', 'WaveInfo',
+ 'WaveList', 'WaveMax', 'WaveMin', 'WaveName', 'WaveRefIndexed',
+ 'WaveRefIndexedDFR', 'WaveRefsEqual', 'WaveRefWaveToList', 'WaveTextEncoding',
+ 'WaveType', 'WaveUnits', 'WhichListItem', 'WinList', 'WinName', 'WinRecreation',
+ 'WinType', 'wnoise', 'xcsr', 'XWaveName', 'XWaveRefFromTrace', 'x2pnt', 'zcsr',
+ 'ZernikeR', 'zeromq_client_connect', 'zeromq_client_recv',
+ 'zeromq_client_send', 'zeromq_handler_start', 'zeromq_handler_stop',
+ 'zeromq_server_bind', 'zeromq_server_recv', 'zeromq_server_send', 'zeromq_set',
+ 'zeromq_stop', 'zeromq_test_callfunction', 'zeromq_test_serializeWave', 'zeta'
+ )
+
+ tokens = {
+ 'root': [
+ (r'//.*$', Comment.Single),
+ (r'"([^"\\]|\\.)*"', String),
+ # Flow Control.
+ (words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword),
+ # Types.
+ (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+ # Keywords.
+ (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
+ # Built-in operations.
+ (words(operations, prefix=r'\b', suffix=r'\b'), Name.Class),
+ # Built-in functions.
+ (words(functions, prefix=r'\b', suffix=r'\b'), Name.Function),
+ # Compiler directives.
+ (r'^#(include|pragma|define|undef|ifdef|ifndef|if|elif|else|endif)',
+ Name.Decorator),
+ (r'\s+', Whitespace),
+ (r'[^a-z"/]+$', Text),
+ (r'.', Text),
+ ],
+ }
diff --git a/pygments/lexers/inferno.py b/pygments/lexers/inferno.py
new file mode 100644
index 0000000..a3bee4f
--- /dev/null
+++ b/pygments/lexers/inferno.py
@@ -0,0 +1,96 @@
+"""
+ pygments.lexers.inferno
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Inferno os and all the related stuff.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, default
+from pygments.token import Punctuation, Comment, Operator, Keyword, \
+ Name, String, Number, Whitespace
+
+__all__ = ['LimboLexer']
+
+
+class LimboLexer(RegexLexer):
+ """
+ Lexer for Limbo programming language
+
+ TODO:
+ - maybe implement better var declaration highlighting
+ - some simple syntax error highlighting
+
+ .. versionadded:: 2.0
+ """
+ name = 'Limbo'
+ url = 'http://www.vitanuova.com/inferno/limbo.html'
+ aliases = ['limbo']
+ filenames = ['*.b']
+ mimetypes = ['text/limbo']
+
+ tokens = {
+ 'whitespace': [
+ (r'^(\s*)([a-zA-Z_]\w*:)(\s*\n)',
+ bygroups(Whitespace, Name.Label, Whitespace)),
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
+ r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\', String), # stray backslash
+ ],
+ 'statements': [
+ (r'"', String, 'string'),
+ (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
+ (r'16r[0-9a-fA-F]+', Number.Hex),
+ (r'8r[0-7]+', Number.Oct),
+ (r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
+ (r'[()\[\],.]', Punctuation),
+ (r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
+ (r'(alt|break|case|continue|cyclic|do|else|exit'
+ r'for|hd|if|implement|import|include|len|load|or'
+ r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
+ (r'(byte|int|big|real|string|array|chan|list|adt'
+ r'|fn|ref|of|module|self|type)\b', Keyword.Type),
+ (r'(con|iota|nil)\b', Keyword.Constant),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'statement' : [
+ include('whitespace'),
+ include('statements'),
+ ('[{}]', Punctuation),
+ (';', Punctuation, '#pop'),
+ ],
+ 'root': [
+ include('whitespace'),
+ default('statement'),
+ ],
+ }
+
+ def analyse_text(text):
+ # Any limbo module implements something
+ if re.search(r'^implement \w+;', text, re.MULTILINE):
+ return 0.7
+
+# TODO:
+# - Make lexers for:
+# - asm sources
+# - man pages
+# - mkfiles
+# - module definitions
+# - namespace definitions
+# - shell scripts
+# - maybe keyfiles and fonts
+# they all seem to be quite similar to their equivalents
+# from unix world, so there should not be a lot of problems
diff --git a/pygments/lexers/installers.py b/pygments/lexers/installers.py
new file mode 100644
index 0000000..e2013b2
--- /dev/null
+++ b/pygments/lexers/installers.py
@@ -0,0 +1,327 @@
+"""
+ pygments.lexers.installers
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for installer/packager DSLs and formats.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Punctuation, Generic, Number, Whitespace
+
+__all__ = ['NSISLexer', 'RPMSpecLexer', 'SourcesListLexer',
+ 'DebianControlLexer']
+
+
+class NSISLexer(RegexLexer):
+ """
+ For NSIS scripts.
+
+ .. versionadded:: 1.6
+ """
+ name = 'NSIS'
+ url = 'http://nsis.sourceforge.net/'
+ aliases = ['nsis', 'nsi', 'nsh']
+ filenames = ['*.nsi', '*.nsh']
+ mimetypes = ['text/x-nsis']
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'([;#].*)(\n)', bygroups(Comment, Whitespace)),
+ (r"'.*?'", String.Single),
+ (r'"', String.Double, 'str_double'),
+ (r'`', String.Backtick, 'str_backtick'),
+ include('macro'),
+ include('interpol'),
+ include('basic'),
+ (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo),
+ (r'/[a-z_]\w*', Name.Attribute),
+ (r'\s+', Whitespace),
+ (r'[\w.]+', Text),
+ ],
+ 'basic': [
+ (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b',
+ bygroups(Whitespace, Keyword, Whitespace, Name.Function)),
+ (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b',
+ bygroups(Keyword.Namespace, Punctuation, Name.Function)),
+ (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)),
+ (r'(\b[ULS]|\B)([!<>=]?=|\<\>?|\>)\B', Operator),
+ (r'[|+-]', Operator),
+ (r'\\', Punctuation),
+ (r'\b(Abort|Add(?:BrandingImage|Size)|'
+ r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|'
+ r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|'
+ r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|'
+ r'ComponentText|CopyFiles|CRCCheck|'
+ r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|'
+ r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|'
+ r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|'
+ r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|'
+ r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|'
+ r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|'
+ r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|'
+ r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|'
+ r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|'
+ r'InstDirError|LabelAddress|TempFileName)|'
+ r'Goto|HideWindow|Icon|'
+ r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|'
+ r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|'
+ r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|'
+ r'IsWindow|LangString(?:UP)?|'
+ r'License(?:BkColor|Data|ForceSelection|LangString|Text)|'
+ r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|'
+ r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|'
+ r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|'
+ r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|'
+ r'Return|RMDir|SearchPath|Section(?:Divider|End|'
+ r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|'
+ r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|'
+ r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|'
+ r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|'
+ r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|'
+ r'Silent|StaticBkColor)|'
+ r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|'
+ r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|'
+ r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|'
+ r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|'
+ r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|'
+ r'XPStyle)\b', Keyword),
+ (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?'
+ r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|'
+ r'HK(CC|CR|CU|DD|LM|PD|U)|'
+ r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|'
+ r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|'
+ r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|'
+ r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|'
+ r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|'
+ r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|'
+ r'YESNO(?:CANCEL)?)|SET|SHCTX|'
+ r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|'
+ r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|'
+ r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|'
+ r'listonly|lzma|nevershow|none|normal|off|on|pop|push|'
+ r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|'
+ r'true|try|user|zlib)\b', Name.Constant),
+ ],
+ 'macro': [
+ (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|'
+ r'delfilefile|echo(?:message)?|else|endif|error|execute|'
+ r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|'
+ r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|'
+ r'warning)\b', Comment.Preproc),
+ ],
+ 'interpol': [
+ (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers
+ (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|'
+ r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|'
+ r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|'
+ r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|'
+ r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|'
+ r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})',
+ Name.Builtin),
+ (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global),
+ (r'\$[a-z_]\w*', Name.Variable),
+ ],
+ 'str_double': [
+ (r'"', String.Double, '#pop'),
+ (r'\$(\\[nrt"]|\$)', String.Escape),
+ include('interpol'),
+ (r'[^"]+', String.Double),
+ ],
+ 'str_backtick': [
+ (r'`', String.Double, '#pop'),
+ (r'\$(\\[nrt"]|\$)', String.Escape),
+ include('interpol'),
+ (r'[^`]+', String.Double),
+ ],
+ }
+
+
+class RPMSpecLexer(RegexLexer):
+ """
+ For RPM ``.spec`` files.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'RPMSpec'
+ aliases = ['spec']
+ filenames = ['*.spec']
+ mimetypes = ['text/x-rpm-spec']
+
+ _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|'
+ 'post[a-z]*|trigger[a-z]*|files)')
+
+ tokens = {
+ 'root': [
+ (r'#.*$', Comment),
+ include('basic'),
+ ],
+ 'description': [
+ (r'^(%' + _directives + ')(.*)$',
+ bygroups(Name.Decorator, Text), '#pop'),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ 'changelog': [
+ (r'\*.*$', Generic.Subheading),
+ (r'^(%' + _directives + ')(.*)$',
+ bygroups(Name.Decorator, Text), '#pop'),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ include('interpol'),
+ (r'.', String.Double),
+ ],
+ 'basic': [
+ include('macro'),
+ (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|'
+ r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|'
+ r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|'
+ r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$',
+ bygroups(Generic.Heading, Punctuation, using(this))),
+ (r'^%description', Name.Decorator, 'description'),
+ (r'^%changelog', Name.Decorator, 'changelog'),
+ (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)),
+ (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|'
+ r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)',
+ Keyword),
+ include('interpol'),
+ (r"'.*?'", String.Single),
+ (r'"', String.Double, 'string'),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ 'macro': [
+ (r'%define.*$', Comment.Preproc),
+ (r'%\{\!\?.*%define.*\}', Comment.Preproc),
+ (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$',
+ bygroups(Comment.Preproc, Text)),
+ ],
+ 'interpol': [
+ (r'%\{?__[a-z_]+\}?', Name.Function),
+ (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo),
+ (r'%\{\?\w+\}', Name.Variable),
+ (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global),
+ (r'%\{[a-zA-Z]\w+\}', Keyword.Constant),
+ ]
+ }
+
+
+class SourcesListLexer(RegexLexer):
+ """
+ Lexer that highlights debian sources.list files.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'Debian Sourcelist'
+ aliases = ['debsources', 'sourceslist', 'sources.list']
+ filenames = ['sources.list']
+ mimetype = ['application/x-debian-sourceslist']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#.*?$', Comment),
+ (r'^(deb(?:-src)?)(\s+)',
+ bygroups(Keyword, Whitespace), 'distribution')
+ ],
+ 'distribution': [
+ (r'#.*?$', Comment, '#pop'),
+ (r'\$\(ARCH\)', Name.Variable),
+ (r'[^\s$[]+', String),
+ (r'\[', String.Other, 'escaped-distribution'),
+ (r'\$', String),
+ (r'\s+', Whitespace, 'components')
+ ],
+ 'escaped-distribution': [
+ (r'\]', String.Other, '#pop'),
+ (r'\$\(ARCH\)', Name.Variable),
+ (r'[^\]$]+', String.Other),
+ (r'\$', String.Other)
+ ],
+ 'components': [
+ (r'#.*?$', Comment, '#pop:2'),
+ (r'$', Text, '#pop:2'),
+ (r'\s+', Whitespace),
+ (r'\S+', Keyword.Pseudo),
+ ]
+ }
+
+ def analyse_text(text):
+ for line in text.splitlines():
+ line = line.strip()
+ if line.startswith('deb ') or line.startswith('deb-src '):
+ return True
+
+
+class DebianControlLexer(RegexLexer):
+ """
+ Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
+
+ .. versionadded:: 0.9
+ """
+ name = 'Debian Control file'
+ url = 'https://www.debian.org/doc/debian-policy/ch-controlfields.html'
+ aliases = ['debcontrol', 'control']
+ filenames = ['control']
+
+ tokens = {
+ 'root': [
+ (r'^(Description)', Keyword, 'description'),
+ (r'^(Maintainer|Uploaders)(:\s*)', bygroups(Keyword, Text),
+ 'maintainer'),
+ (r'^((?:Build-|Pre-)?Depends(?:-Indep|-Arch)?)(:\s*)',
+ bygroups(Keyword, Text), 'depends'),
+ (r'^(Recommends|Suggests|Enhances)(:\s*)', bygroups(Keyword, Text),
+ 'depends'),
+ (r'^((?:Python-)?Version)(:\s*)(\S+)$',
+ bygroups(Keyword, Text, Number)),
+ (r'^((?:Installed-)?Size)(:\s*)(\S+)$',
+ bygroups(Keyword, Text, Number)),
+ (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$',
+ bygroups(Keyword, Text, Number)),
+ (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
+ bygroups(Keyword, Whitespace, String)),
+ ],
+ 'maintainer': [
+ (r'<[^>]+>$', Generic.Strong, '#pop'),
+ (r'<[^>]+>', Generic.Strong),
+ (r',\n?', Text),
+ (r'[^,<]+$', Text, '#pop'),
+ (r'[^,<]+', Text),
+ ],
+ 'description': [
+ (r'(.*)(Homepage)(: )(\S+)',
+ bygroups(Text, String, Name, Name.Class)),
+ (r':.*\n', Generic.Strong),
+ (r' .*\n', Text),
+ default('#pop'),
+ ],
+ 'depends': [
+ (r'(\$)(\{)(\w+\s*:\s*\w+)(\})',
+ bygroups(Operator, Text, Name.Entity, Text)),
+ (r'\(', Text, 'depend_vers'),
+ (r'\|', Operator),
+ (r',\n', Text),
+ (r'\n', Text, '#pop'),
+ (r'[,\s]', Text),
+ (r'[+.a-zA-Z0-9-]+', Name.Function),
+ (r'\[.*?\]', Name.Entity),
+ ],
+ 'depend_vers': [
+ (r'\)', Text, '#pop'),
+ (r'([><=]+)(\s*)([^)]+)', bygroups(Operator, Text, Number)),
+ ]
+ }
diff --git a/pygments/lexers/int_fiction.py b/pygments/lexers/int_fiction.py
new file mode 100644
index 0000000..a31a1e3
--- /dev/null
+++ b/pygments/lexers/int_fiction.py
@@ -0,0 +1,1382 @@
+"""
+ pygments.lexers.int_fiction
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for interactive fiction languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, \
+ this, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error, Generic
+
+__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer',
+ 'Tads3Lexer']
+
+
+class Inform6Lexer(RegexLexer):
+ """
+ For Inform 6 source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 6'
+ url = 'http://inform-fiction.org/'
+ aliases = ['inform6', 'i6']
+ filenames = ['*.inf']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ _name = r'[a-zA-Z_]\w*'
+
+ # Inform 7 maps these four character classes to their ASCII
+ # equivalents. To support Inform 6 inclusions within Inform 7,
+ # Inform6Lexer maps them too.
+ _dash = '\\-\u2010-\u2014'
+ _dquote = '"\u201c\u201d'
+ _squote = "'\u2018\u2019"
+ _newline = '\\n\u0085\u2028\u2029'
+
+ tokens = {
+ 'root': [
+ (r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc,
+ 'directive'),
+ default('directive')
+ ],
+ '_whitespace': [
+ (r'\s+', Text),
+ (r'![^%s]*' % _newline, Comment.Single)
+ ],
+ 'default': [
+ include('_whitespace'),
+ (r'\[', Punctuation, 'many-values'), # Array initialization
+ (r':|(?=;)', Punctuation, '#pop'),
+ (r'<', Punctuation), # Second angle bracket in an action statement
+ default(('expression', '_expression'))
+ ],
+
+ # Expressions
+ '_expression': [
+ include('_whitespace'),
+ (r'(?=sp\b)', Text, '#pop'),
+ (r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
+ ('#pop', 'value')),
+ (r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
+ (r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
+ ],
+ 'expression': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('expression', '_expression')),
+ (r'\)', Punctuation, '#pop'),
+ (r'\[', Punctuation, ('#pop', 'statements', 'locals')),
+ (r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
+ (r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
+ (r',', Punctuation, '_expression'),
+ (r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
+ Operator, '_expression'),
+ (r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
+ '_expression'),
+ (r'sp\b', Name),
+ (r'\?~?', Name.Label, 'label?'),
+ (r'[@{]', Error),
+ default('#pop')
+ ],
+ '_assembly-expression': [
+ (r'\(', Punctuation, ('#push', '_expression')),
+ (r'[\[\]]', Punctuation),
+ (r'[%s]>' % _dash, Punctuation, '_expression'),
+ (r'sp\b', Keyword.Pseudo),
+ (r';', Punctuation, '#pop:3'),
+ include('expression')
+ ],
+ '_for-expression': [
+ (r'\)', Punctuation, '#pop:2'),
+ (r':', Punctuation, '#pop'),
+ include('expression')
+ ],
+ '_keyword-expression': [
+ (r'(from|near|to)\b', Keyword, '_expression'),
+ include('expression')
+ ],
+ '_list-expression': [
+ (r',', Punctuation, '#pop'),
+ include('expression')
+ ],
+ '_object-expression': [
+ (r'has\b', Keyword.Declaration, '#pop'),
+ include('_list-expression')
+ ],
+
+ # Values
+ 'value': [
+ include('_whitespace'),
+ # Strings
+ (r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
+ (r'([%s])(@\{[0-9a-fA-F]*\})([%s])' % (_squote, _squote),
+ bygroups(String.Char, String.Escape, String.Char), '#pop'),
+ (r'([%s])(@.{2})([%s])' % (_squote, _squote),
+ bygroups(String.Char, String.Escape, String.Char), '#pop'),
+ (r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
+ # Numbers
+ (r'\$[<>]?[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
+ Number.Float, '#pop'),
+ (r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
+ (r'\$\$[01]+', Number.Bin, '#pop'),
+ (r'[0-9]+', Number.Integer, '#pop'),
+ # Values prefixed by hashes
+ (r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
+ (r'(#g\$)(%s)' % _name,
+ bygroups(Operator, Name.Variable.Global), '#pop'),
+ (r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
+ (r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
+ (r'#', Name.Builtin, ('#pop', 'system-constant')),
+ # System functions
+ (words((
+ 'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass',
+ 'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'),
+ Name.Builtin, '#pop'),
+ # Metaclasses
+ (r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
+ # Veneer routines
+ (words((
+ 'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms',
+ 'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String',
+ 'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__',
+ 'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr',
+ 'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process',
+ 'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA',
+ 'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR',
+ 'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr',
+ 'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'),
+ prefix='(?i)', suffix=r'\b'),
+ Name.Builtin, '#pop'),
+ # Other built-in symbols
+ (words((
+ 'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE',
+ 'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'DOUBLE_HI_INFINITY',
+ 'DOUBLE_HI_NAN', 'DOUBLE_HI_NINFINITY', 'DOUBLE_LO_INFINITY', 'DOUBLE_LO_NAN',
+ 'DOUBLE_LO_NINFINITY', 'false', 'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY',
+ 'GOBJFIELD_CHAIN', 'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT',
+ 'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START',
+ 'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX',
+ 'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print',
+ 'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE',
+ 'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag',
+ 'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3',
+ 'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'),
+ prefix='(?i)', suffix=r'\b'),
+ Name.Builtin, '#pop'),
+ # Other values
+ (_name, Name, '#pop')
+ ],
+ 'value?': [
+ include('value'),
+ default('#pop')
+ ],
+ # Strings
+ 'dictionary-word': [
+ (r'[~^]+', String.Escape),
+ (r'[^~^\\@({%s]+' % _squote, String.Single),
+ (r'[({]', String.Single),
+ (r'@\{[0-9a-fA-F]*\}', String.Escape),
+ (r'@.{2}', String.Escape),
+ (r'[%s]' % _squote, String.Single, '#pop')
+ ],
+ 'string': [
+ (r'[~^]+', String.Escape),
+ (r'[^~^\\@({%s]+' % _dquote, String.Double),
+ (r'[({]', String.Double),
+ (r'\\', String.Escape),
+ (r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
+ (_newline, _newline), String.Escape),
+ (r'@(\\\s*[%s]\s*)*[({]((\\\s*[%s]\s*)*[0-9a-zA-Z_])*'
+ r'(\\\s*[%s]\s*)*[)}]' % (_newline, _newline, _newline),
+ String.Escape),
+ (r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
+ String.Escape),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ 'plain-string': [
+ (r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
+ (r'[~^({\[\]]', String.Double),
+ (r'\\', String.Escape),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ # Names
+ '_constant': [
+ include('_whitespace'),
+ (_name, Name.Constant, '#pop'),
+ include('value')
+ ],
+ 'constant*': [
+ include('_whitespace'),
+ (r',', Punctuation),
+ (r'=', Punctuation, 'value?'),
+ (_name, Name.Constant, 'value?'),
+ default('#pop')
+ ],
+ '_global': [
+ include('_whitespace'),
+ (_name, Name.Variable.Global, '#pop'),
+ include('value')
+ ],
+ 'label?': [
+ include('_whitespace'),
+ (_name, Name.Label, '#pop'),
+ default('#pop')
+ ],
+ 'variable?': [
+ include('_whitespace'),
+ (_name, Name.Variable, '#pop'),
+ default('#pop')
+ ],
+ # Values after hashes
+ 'obsolete-dictionary-word': [
+ (r'\S\w*', String.Other, '#pop')
+ ],
+ 'system-constant': [
+ include('_whitespace'),
+ (_name, Name.Builtin, '#pop')
+ ],
+
+ # Directives
+ 'directive': [
+ include('_whitespace'),
+ (r'#', Punctuation),
+ (r';', Punctuation, '#pop'),
+ (r'\[', Punctuation,
+ ('default', 'statements', 'locals', 'routine-name?')),
+ (words((
+ 'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot',
+ 'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file',
+ 'version'), prefix='(?i)', suffix=r'\b'),
+ Keyword, 'default'),
+ (r'(?i)(array|global)\b', Keyword,
+ ('default', 'directive-keyword?', '_global')),
+ (r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
+ (r'(?i)class\b', Keyword,
+ ('object-body', 'duplicates', 'class-name')),
+ (r'(?i)(constant|default)\b', Keyword,
+ ('default', 'constant*')),
+ (r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
+ (r'(?i)(extend|verb)\b', Keyword, 'grammar'),
+ (r'(?i)fake_action\b', Keyword, ('default', '_constant')),
+ (r'(?i)import\b', Keyword, 'manifest'),
+ (r'(?i)(include|link|origsource)\b', Keyword,
+ ('default', 'before-plain-string?')),
+ (r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
+ (r'(?i)message\b', Keyword, ('default', 'diagnostic')),
+ (r'(?i)(nearby|object)\b', Keyword,
+ ('object-body', '_object-head')),
+ (r'(?i)property\b', Keyword,
+ ('default', 'alias?', '_constant', 'property-keyword*')),
+ (r'(?i)replace\b', Keyword,
+ ('default', 'routine-name?', 'routine-name?')),
+ (r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
+ (r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
+ (r'(?i)trace\b', Keyword,
+ ('default', 'trace-keyword?', 'trace-keyword?')),
+ (r'(?i)zcharacter\b', Keyword,
+ ('default', 'directive-keyword?', 'directive-keyword?')),
+ (_name, Name.Class, ('object-body', '_object-head'))
+ ],
+ # [, Replace, Stub
+ 'routine-name?': [
+ include('_whitespace'),
+ (_name, Name.Function, '#pop'),
+ default('#pop')
+ ],
+ 'locals': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r'\*', Punctuation),
+ (r'"', String.Double, 'plain-string'),
+ (_name, Name.Variable)
+ ],
+ # Array
+ 'many-values': [
+ include('_whitespace'),
+ (r';', Punctuation),
+ (r'\]', Punctuation, '#pop'),
+ (r':', Error),
+ default(('expression', '_expression'))
+ ],
+ # Attribute, Property
+ 'alias?': [
+ include('_whitespace'),
+ (r'alias\b', Keyword, ('#pop', '_constant')),
+ default('#pop')
+ ],
+ # Class, Object, Nearby
+ 'class-name': [
+ include('_whitespace'),
+ (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
+ (_name, Name.Class, '#pop')
+ ],
+ 'duplicates': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('#pop', 'expression', '_expression')),
+ default('#pop')
+ ],
+ '_object-head': [
+ (r'[%s]>' % _dash, Punctuation),
+ (r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
+ include('_global')
+ ],
+ 'object-body': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop:2'),
+ (r',', Punctuation),
+ (r'class\b', Keyword.Declaration, 'class-segment'),
+ (r'(has|private|with)\b', Keyword.Declaration),
+ (r':', Error),
+ default(('_object-expression', '_expression'))
+ ],
+ 'class-segment': [
+ include('_whitespace'),
+ (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
+ (_name, Name.Class),
+ default('value')
+ ],
+ # Extend, Verb
+ 'grammar': [
+ include('_whitespace'),
+ (r'=', Punctuation, ('#pop', 'default')),
+ (r'\*', Punctuation, ('#pop', 'grammar-line')),
+ default('_directive-keyword')
+ ],
+ 'grammar-line': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r'[/*]', Punctuation),
+ (r'[%s]>' % _dash, Punctuation, 'value'),
+ (r'(noun|scope)\b', Keyword, '=routine'),
+ default('_directive-keyword')
+ ],
+ '=routine': [
+ include('_whitespace'),
+ (r'=', Punctuation, 'routine-name?'),
+ default('#pop')
+ ],
+ # Import
+ 'manifest': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'(?i)global\b', Keyword, '_global'),
+ default('_global')
+ ],
+ # Include, Link, Message
+ 'diagnostic': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
+ default(('#pop', 'before-plain-string?', 'directive-keyword?'))
+ ],
+ 'before-plain-string?': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string')),
+ default('#pop')
+ ],
+ 'message-string': [
+ (r'[~^]+', String.Escape),
+ include('plain-string')
+ ],
+
+ # Keywords used in directives
+ '_directive-keyword!': [
+ include('_whitespace'),
+ (words((
+ 'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror',
+ 'first', 'has', 'held', 'individual', 'initial', 'initstr', 'last', 'long', 'meta',
+ 'multi', 'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only',
+ 'private', 'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table',
+ 'terminating', 'time', 'topic', 'warning', 'with'), suffix=r'\b'),
+ Keyword, '#pop'),
+ (r'static\b', Keyword),
+ (r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
+ ],
+ '_directive-keyword': [
+ include('_directive-keyword!'),
+ include('value')
+ ],
+ 'directive-keyword?': [
+ include('_directive-keyword!'),
+ default('#pop')
+ ],
+ 'property-keyword*': [
+ include('_whitespace'),
+ (words(('additive', 'individual', 'long'),
+ suffix=r'\b(?=(\s*|(![^%s]*[%s]))*[_a-zA-Z])' % (_newline, _newline)),
+ Keyword),
+ default('#pop')
+ ],
+ 'trace-keyword?': [
+ include('_whitespace'),
+ (words((
+ 'assembly', 'dictionary', 'expressions', 'lines', 'linker',
+ 'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'),
+ Keyword, '#pop'),
+ default('#pop')
+ ],
+
+ # Statements
+ 'statements': [
+ include('_whitespace'),
+ (r'\]', Punctuation, '#pop'),
+ (r'[;{}]', Punctuation),
+ (words((
+ 'box', 'break', 'continue', 'default', 'give', 'inversion',
+ 'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue',
+ 'spaces', 'string', 'until'), suffix=r'\b'),
+ Keyword, 'default'),
+ (r'(do|else)\b', Keyword),
+ (r'(font|style)\b', Keyword,
+ ('default', 'miscellaneous-keyword?')),
+ (r'for\b', Keyword, ('for', '(?')),
+ (r'(if|switch|while)', Keyword,
+ ('expression', '_expression', '(?')),
+ (r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
+ (r'objectloop\b', Keyword,
+ ('_keyword-expression', 'variable?', '(?')),
+ (r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
+ (r'\.', Name.Label, 'label?'),
+ (r'@', Keyword, 'opcode'),
+ (r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
+ (r'<', Punctuation, 'default'),
+ (r'move\b', Keyword,
+ ('default', '_keyword-expression', '_expression')),
+ default(('default', '_keyword-expression', '_expression'))
+ ],
+ 'miscellaneous-keyword?': [
+ include('_whitespace'),
+ (r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
+ Keyword, '#pop'),
+ (r'(a|A|an|address|char|name|number|object|property|string|the|'
+ r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
+ '#pop'),
+ (r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
+ '#pop'),
+ default('#pop')
+ ],
+ '(?': [
+ include('_whitespace'),
+ (r'\(', Punctuation, '#pop'),
+ default('#pop')
+ ],
+ 'for': [
+ include('_whitespace'),
+ (r';', Punctuation, ('_for-expression', '_expression')),
+ default(('_for-expression', '_expression'))
+ ],
+ 'print-list': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r':', Error),
+ default(('_list-expression', '_expression', '_list-expression', 'form'))
+ ],
+ 'form': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
+ default('#pop')
+ ],
+
+ # Assembly
+ 'opcode': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
+ (_name, Keyword, 'operands')
+ ],
+ 'operands': [
+ (r':', Error),
+ default(('_assembly-expression', '_expression'))
+ ]
+ }
+
+ def get_tokens_unprocessed(self, text):
+ # 'in' is either a keyword or an operator.
+ # If the token two tokens after 'in' is ')', 'in' is a keyword:
+ # objectloop(a in b)
+ # Otherwise, it is an operator:
+ # objectloop(a in b && true)
+ objectloop_queue = []
+ objectloop_token_count = -1
+ previous_token = None
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self,
+ text):
+ if previous_token is Name.Variable and value == 'in':
+ objectloop_queue = [[index, token, value]]
+ objectloop_token_count = 2
+ elif objectloop_token_count > 0:
+ if token not in Comment and token not in Text:
+ objectloop_token_count -= 1
+ objectloop_queue.append((index, token, value))
+ else:
+ if objectloop_token_count == 0:
+ if objectloop_queue[-1][2] == ')':
+ objectloop_queue[0][1] = Keyword
+ while objectloop_queue:
+ yield objectloop_queue.pop(0)
+ objectloop_token_count = -1
+ yield index, token, value
+ if token not in Comment and token not in Text:
+ previous_token = token
+ while objectloop_queue:
+ yield objectloop_queue.pop(0)
+
+ def analyse_text(text):
+ """We try to find a keyword which seem relatively common, unfortunately
+ there is a decent overlap with Smalltalk keywords otherwise here.."""
+ result = 0
+ if re.search('\borigsource\b', text, re.IGNORECASE):
+ result += 0.05
+
+ return result
+
+
+class Inform7Lexer(RegexLexer):
+ """
+ For Inform 7 source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 7'
+ url = 'http://inform7.com/'
+ aliases = ['inform7', 'i7']
+ filenames = ['*.ni', '*.i7x']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ _dash = Inform6Lexer._dash
+ _dquote = Inform6Lexer._dquote
+ _newline = Inform6Lexer._newline
+ _start = r'\A|(?<=[%s])' % _newline
+
+ # There are three variants of Inform 7, differing in how to
+ # interpret at signs and braces in I6T. In top-level inclusions, at
+ # signs in the first column are inweb syntax. In phrase definitions
+ # and use options, tokens in braces are treated as I7. Use options
+ # also interpret "{N}".
+ tokens = {}
+ token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
+
+ for level in token_variants:
+ tokens[level] = {
+ '+i6-root': list(Inform6Lexer.tokens['root']),
+ '+i6t-root': [ # For Inform6TemplateLexer
+ (r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
+ ('directive', '+p'))
+ ],
+ 'root': [
+ (r'(\|?\s)+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]' % _dquote, Generic.Heading,
+ ('+main', '+titling', '+titling-string')),
+ default(('+main', '+heading?'))
+ ],
+ '+titling-string': [
+ (r'[^%s]+' % _dquote, Generic.Heading),
+ (r'[%s]' % _dquote, Generic.Heading, '#pop')
+ ],
+ '+titling': [
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
+ (r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
+ (r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
+ Text, ('#pop', '+heading?')),
+ (r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
+ (r'[|%s]' % _newline, Generic.Heading)
+ ],
+ '+main': [
+ (r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
+ (r'[%s]' % _dquote, String.Double, '+text'),
+ (r':', Text, '+phrase-definition'),
+ (r'(?i)\bas\b', Text, '+use-option'),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive'),
+ i6t='+i6t-not-inline'), Punctuation)),
+ (r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
+ (_start, _dquote, _newline), Text, '+heading?'),
+ (r'(?i)[a(|%s]' % _newline, Text)
+ ],
+ '+phrase-definition': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive',
+ 'default', 'statements'),
+ i6t='+i6t-inline'), Punctuation), '#pop'),
+ default('#pop')
+ ],
+ '+use-option': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive'),
+ i6t='+i6t-use-option'), Punctuation), '#pop'),
+ default('#pop')
+ ],
+ '+comment': [
+ (r'[^\[\]]+', Comment.Multiline),
+ (r'\[', Comment.Multiline, '#push'),
+ (r'\]', Comment.Multiline, '#pop')
+ ],
+ '+text': [
+ (r'[^\[%s]+' % _dquote, String.Double),
+ (r'\[.*?\]', String.Interpol),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ '+heading?': [
+ (r'(\|?\s)+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
+ (r'[%s]{1,3}' % _dash, Text),
+ (r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
+ Generic.Heading, '#pop'),
+ default('#pop')
+ ],
+ '+documentation-heading': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(?i)documentation\s+', Text, '+documentation-heading2'),
+ default('#pop')
+ ],
+ '+documentation-heading2': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]{4}\s' % _dash, Text, '+documentation'),
+ default('#pop:2')
+ ],
+ '+documentation': [
+ (r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
+ (_start, _newline), Generic.Heading),
+ (r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
+ Generic.Subheading),
+ (r'((%s)\t.*?[%s])+' % (_start, _newline),
+ using(this, state='+main')),
+ (r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ ],
+ '+i6t-not-inline': [
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc),
+ (r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
+ Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading, '+p')
+ ],
+ '+i6t-use-option': [
+ include('+i6t-not-inline'),
+ (r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation))
+ ],
+ '+i6t-inline': [
+ (r'(\{)(\S[^}]*)?(\})',
+ bygroups(Punctuation, using(this, state='+main'),
+ Punctuation))
+ ],
+ '+i6t': [
+ (r'(\{[%s])(![^}]*)(\}?)' % _dash,
+ bygroups(Punctuation, Comment.Single, Punctuation)),
+ (r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation, Text,
+ Punctuation), '+lines'),
+ (r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation, Text,
+ Punctuation)),
+ (r'(\(\+)(.*?)(\+\)|\Z)',
+ bygroups(Punctuation, using(this, state='+main'),
+ Punctuation))
+ ],
+ '+p': [
+ (r'[^@]+', Comment.Preproc),
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc, '#pop'),
+ (r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading),
+ (r'@', Comment.Preproc)
+ ],
+ '+lines': [
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc),
+ (r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
+ Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading, '+p'),
+ (r'(%s)@\w*[ %s]' % (_start, _newline), Keyword),
+ (r'![^%s]*' % _newline, Comment.Single),
+ (r'(\{)([%s]endlines)(\})' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation), '#pop'),
+ (r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
+ ]
+ }
+ # Inform 7 can include snippets of Inform 6 template language,
+ # so all of Inform6Lexer's states are copied here, with
+ # modifications to account for template syntax. Inform7Lexer's
+ # own states begin with '+' to avoid name conflicts. Some of
+ # Inform6Lexer's states begin with '_': these are not modified.
+ # They deal with template syntax either by including modified
+ # states, or by matching r'' then pushing to modified states.
+ for token in Inform6Lexer.tokens:
+ if token == 'root':
+ continue
+ tokens[level][token] = list(Inform6Lexer.tokens[token])
+ if not token.startswith('_'):
+ tokens[level][token][:0] = [include('+i6t'), include(level)]
+
+ def __init__(self, **options):
+ level = options.get('i6t', '+i6t-not-inline')
+ if level not in self._all_tokens:
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+ RegexLexer.__init__(self, **options)
+
+
+class Inform6TemplateLexer(Inform7Lexer):
+ """
+ For Inform 6 template code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 6 template'
+ aliases = ['i6t']
+ filenames = ['*.i6t']
+
+ def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
+ return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
+
+
+class Tads3Lexer(RegexLexer):
+ """
+ For TADS 3 source code.
+ """
+
+ name = 'TADS 3'
+ aliases = ['tads3']
+ filenames = ['*.t']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ _comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)'
+ _comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)'
+ _escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|'
+ r'[0-3]?[0-7]{1,2}))')
+ _name = r'(?:[_a-zA-Z]\w*)'
+ _no_quote = r'(?=\s|\\?>)'
+ _operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|'
+ r'(?:[=+\-*/%!&|^]|<<?|>>?>?)=?)')
+ _ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline)
+ _ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline)
+
+ def _make_string_state(triple, double, verbatim=None, _escape=_escape):
+ if verbatim:
+ verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()),
+ re.escape(c.upper()))
+ for c in verbatim])
+ char = r'"' if double else r"'"
+ token = String.Double if double else String.Single
+ escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
+ prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's')
+ tag_state_name = '%sqt' % prefix
+ state = []
+ if triple:
+ state += [
+ (r'%s{3,}' % char, token, '#pop'),
+ (r'\\%s+' % char, String.Escape),
+ (char, token)
+ ]
+ else:
+ state.append((char, token, '#pop'))
+ state += [
+ include('s/verbatim'),
+ (r'[^\\<&{}%s]+' % char, token)
+ ]
+ if verbatim:
+ # This regex can't use `(?i)` because escape sequences are
+ # case-sensitive. `<\XMP>` works; `<\xmp>` doesn't.
+ state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' %
+ (_escape, verbatim),
+ Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name)))
+ else:
+ state += [
+ (r'\\?<!([^><\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' %
+ (char, char, escaped_quotes, _escape), Comment.Multiline),
+ (r'(?i)\\?<listing(?=[\s=>]|\\>)', Name.Tag,
+ ('#pop', '%sqs/listing' % prefix, tag_state_name)),
+ (r'(?i)\\?<xmp(?=[\s=>]|\\>)', Name.Tag,
+ ('#pop', '%sqs/xmp' % prefix, tag_state_name)),
+ (r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' %
+ (char, char, escaped_quotes, _escape), Name.Tag,
+ tag_state_name),
+ include('s/entity')
+ ]
+ state += [
+ include('s/escape'),
+ (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
+ (char, char, escaped_quotes, _escape), String.Interpol),
+ (r'[\\&{}<]', token)
+ ]
+ return state
+
+ def _make_tag_state(triple, double, _escape=_escape):
+ char = r'"' if double else r"'"
+ quantifier = r'{3,}' if triple else r''
+ state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's')
+ token = String.Double if double else String.Single
+ escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
+ return [
+ (r'%s%s' % (char, quantifier), token, '#pop:2'),
+ (r'(\s|\\\n)+', Text),
+ (r'(=)(\\?")', bygroups(Punctuation, String.Double),
+ 'dqs/%s' % state_name),
+ (r"(=)(\\?')", bygroups(Punctuation, String.Single),
+ 'sqs/%s' % state_name),
+ (r'=', Punctuation, 'uqs/%s' % state_name),
+ (r'\\?>', Name.Tag, '#pop'),
+ (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
+ (char, char, escaped_quotes, _escape), String.Interpol),
+ (r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' %
+ (char, char, escaped_quotes, _escape), Name.Attribute),
+ include('s/escape'),
+ include('s/verbatim'),
+ include('s/entity'),
+ (r'[\\{}&]', Name.Attribute)
+ ]
+
+ def _make_attribute_value_state(terminator, host_triple, host_double,
+ _escape=_escape):
+ token = (String.Double if terminator == r'"' else
+ String.Single if terminator == r"'" else String.Other)
+ host_char = r'"' if host_double else r"'"
+ host_quantifier = r'{3,}' if host_triple else r''
+ host_token = String.Double if host_double else String.Single
+ escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char)
+ if host_triple else r'')
+ return [
+ (r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'),
+ (r'%s%s' % (r'' if token is String.Other else r'\\?', terminator),
+ token, '#pop'),
+ include('s/verbatim'),
+ include('s/entity'),
+ (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
+ (host_char, host_char, escaped_quotes, _escape), String.Interpol),
+ (r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''),
+ token),
+ include('s/escape'),
+ (r'["\'\s&{<}\\]', token)
+ ]
+
+ tokens = {
+ 'root': [
+ ('\ufeff', Text),
+ (r'\{', Punctuation, 'object-body'),
+ (r';+', Punctuation),
+ (r'(?=(argcount|break|case|catch|continue|default|definingobj|'
+ r'delegated|do|else|for|foreach|finally|goto|if|inherited|'
+ r'invokee|local|nil|new|operator|replaced|return|self|switch|'
+ r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'),
+ (r'(%s)(%s*)(\()' % (_name, _ws),
+ bygroups(Name.Function, using(this, state='whitespace'),
+ Punctuation),
+ ('block?/root', 'more/parameters', 'main/parameters')),
+ include('whitespace'),
+ (r'\++', Punctuation),
+ (r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop
+ (r'(?!\Z)', Text, 'main/root')
+ ],
+ 'main/root': [
+ include('main/basic'),
+ default(('#pop', 'object-body/no-braces', 'classes', 'class'))
+ ],
+ 'object-body/no-braces': [
+ (r';', Punctuation, '#pop'),
+ (r'\{', Punctuation, ('#pop', 'object-body')),
+ include('object-body')
+ ],
+ 'object-body': [
+ (r';', Punctuation),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ (r':', Punctuation, ('classes', 'class')),
+ (r'(%s?)(%s*)(\()' % (_name, _ws),
+ bygroups(Name.Function, using(this, state='whitespace'),
+ Punctuation),
+ ('block?', 'more/parameters', 'main/parameters')),
+ (r'(%s)(%s*)(\{)' % (_name, _ws),
+ bygroups(Name.Function, using(this, state='whitespace'),
+ Punctuation), 'block'),
+ (r'(%s)(%s*)(:)' % (_name, _ws),
+ bygroups(Name.Variable, using(this, state='whitespace'),
+ Punctuation),
+ ('object-body/no-braces', 'classes', 'class')),
+ include('whitespace'),
+ (r'->|%s' % _operator, Punctuation, 'main'),
+ default('main/object-body')
+ ],
+ 'main/object-body': [
+ include('main/basic'),
+ (r'(%s)(%s*)(=?)' % (_name, _ws),
+ bygroups(Name.Variable, using(this, state='whitespace'),
+ Punctuation), ('#pop', 'more', 'main')),
+ default('#pop:2')
+ ],
+ 'block?/root': [
+ (r'\{', Punctuation, ('#pop', 'block')),
+ include('whitespace'),
+ (r'(?=[\[\'"<(:])', Text, # It might be a VerbRule macro.
+ ('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')),
+ # It might be a macro like DefineAction.
+ default(('#pop', 'object-body/no-braces'))
+ ],
+ 'block?': [
+ (r'\{', Punctuation, ('#pop', 'block')),
+ include('whitespace'),
+ default('#pop')
+ ],
+ 'block/basic': [
+ (r'[;:]+', Punctuation),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ (r'default\b', Keyword.Reserved),
+ (r'(%s)(%s*)(:)' % (_name, _ws),
+ bygroups(Name.Label, using(this, state='whitespace'),
+ Punctuation)),
+ include('whitespace')
+ ],
+ 'block': [
+ include('block/basic'),
+ (r'(?!\Z)', Text, ('more', 'main'))
+ ],
+ 'block/embed': [
+ (r'>>', String.Interpol, '#pop'),
+ include('block/basic'),
+ (r'(?!\Z)', Text, ('more/embed', 'main'))
+ ],
+ 'main/basic': [
+ include('whitespace'),
+ (r'\(', Punctuation, ('#pop', 'more', 'main')),
+ (r'\[', Punctuation, ('#pop', 'more/list', 'main')),
+ (r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner',
+ 'more/parameters', 'main/parameters')),
+ (r'\*|\.{3}', Punctuation, '#pop'),
+ (r'(?i)0x[\da-f]+', Number.Hex, '#pop'),
+ (r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+',
+ Number.Float, '#pop'),
+ (r'0[0-7]+', Number.Oct, '#pop'),
+ (r'\d+', Number.Integer, '#pop'),
+ (r'"""', String.Double, ('#pop', 'tdqs')),
+ (r"'''", String.Single, ('#pop', 'tsqs')),
+ (r'"', String.Double, ('#pop', 'dqs')),
+ (r"'", String.Single, ('#pop', 'sqs')),
+ (r'R"""', String.Regex, ('#pop', 'tdqr')),
+ (r"R'''", String.Regex, ('#pop', 'tsqr')),
+ (r'R"', String.Regex, ('#pop', 'dqr')),
+ (r"R'", String.Regex, ('#pop', 'sqr')),
+ # Two-token keywords
+ (r'(extern)(%s+)(object\b)' % _ws,
+ bygroups(Keyword.Reserved, using(this, state='whitespace'),
+ Keyword.Reserved)),
+ (r'(function|method)(%s*)(\()' % _ws,
+ bygroups(Keyword.Reserved, using(this, state='whitespace'),
+ Punctuation),
+ ('#pop', 'block?', 'more/parameters', 'main/parameters')),
+ (r'(modify)(%s+)(grammar\b)' % _ws,
+ bygroups(Keyword.Reserved, using(this, state='whitespace'),
+ Keyword.Reserved),
+ ('#pop', 'object-body/no-braces', ':', 'grammar')),
+ (r'(new)(%s+(?=(?:function|method)\b))' % _ws,
+ bygroups(Keyword.Reserved, using(this, state='whitespace'))),
+ (r'(object)(%s+)(template\b)' % _ws,
+ bygroups(Keyword.Reserved, using(this, state='whitespace'),
+ Keyword.Reserved), ('#pop', 'template')),
+ (r'(string)(%s+)(template\b)' % _ws,
+ bygroups(Keyword, using(this, state='whitespace'),
+ Keyword.Reserved), ('#pop', 'function-name')),
+ # Keywords
+ (r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b',
+ Name.Builtin, '#pop'),
+ (r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')),
+ (r'(case|extern|if|intrinsic|return|static|while)\b',
+ Keyword.Reserved),
+ (r'catch\b', Keyword.Reserved, ('#pop', 'catch')),
+ (r'class\b', Keyword.Reserved,
+ ('#pop', 'object-body/no-braces', 'class')),
+ (r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'),
+ (r'(dictionary|property)\b', Keyword.Reserved,
+ ('#pop', 'constants')),
+ (r'enum\b', Keyword.Reserved, ('#pop', 'enum')),
+ (r'export\b', Keyword.Reserved, ('#pop', 'main')),
+ (r'(for|foreach)\b', Keyword.Reserved,
+ ('#pop', 'more/inner', 'main/inner')),
+ (r'(function|method)\b', Keyword.Reserved,
+ ('#pop', 'block?', 'function-name')),
+ (r'grammar\b', Keyword.Reserved,
+ ('#pop', 'object-body/no-braces', 'grammar')),
+ (r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')),
+ (r'local\b', Keyword.Reserved,
+ ('#pop', 'more/local', 'main/local')),
+ (r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved,
+ '#pop'),
+ (r'new\b', Keyword.Reserved, ('#pop', 'class')),
+ (r'(nil|true)\b', Keyword.Constant, '#pop'),
+ (r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')),
+ (r'operator\b', Keyword.Reserved, ('#pop', 'operator')),
+ (r'propertyset\b', Keyword.Reserved,
+ ('#pop', 'propertyset', 'main')),
+ (r'self\b', Name.Builtin.Pseudo, '#pop'),
+ (r'template\b', Keyword.Reserved, ('#pop', 'template')),
+ # Operators
+ (r'(__objref|defined)(%s*)(\()' % _ws,
+ bygroups(Operator.Word, using(this, state='whitespace'),
+ Operator), ('#pop', 'more/__objref', 'main')),
+ (r'delegated\b', Operator.Word),
+ # Compiler-defined macros and built-in properties
+ (r'(__DATE__|__DEBUG|__LINE__|__FILE__|'
+ r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|'
+ r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|'
+ r'construct|finalize|grammarInfo|grammarTag|lexicalParent|'
+ r'miscVocab|sourceTextGroup|sourceTextGroupName|'
+ r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop')
+ ],
+ 'main': [
+ include('main/basic'),
+ (_name, Name, '#pop'),
+ default('#pop')
+ ],
+ 'more/basic': [
+ (r'\(', Punctuation, ('more/list', 'main')),
+ (r'\[', Punctuation, ('more', 'main')),
+ (r'\.{3}', Punctuation),
+ (r'->|\.\.', Punctuation, 'main'),
+ (r'(?=;)|[:)\]]', Punctuation, '#pop'),
+ include('whitespace'),
+ (_operator, Operator, 'main'),
+ (r'\?', Operator, ('main', 'more/conditional', 'main')),
+ (r'(is|not)(%s+)(in\b)' % _ws,
+ bygroups(Operator.Word, using(this, state='whitespace'),
+ Operator.Word)),
+ (r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop
+ ],
+ 'more': [
+ include('more/basic'),
+ default('#pop')
+ ],
+ # Then expression (conditional operator)
+ 'more/conditional': [
+ (r':(?!:)', Operator, '#pop'),
+ include('more')
+ ],
+ # Embedded expressions
+ 'more/embed': [
+ (r'>>', String.Interpol, '#pop:2'),
+ include('more')
+ ],
+ # For/foreach loop initializer or short-form anonymous function
+ 'main/inner': [
+ (r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')),
+ (r'local\b', Keyword.Reserved, ('#pop', 'main/local')),
+ include('main')
+ ],
+ 'more/inner': [
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation, 'main/inner'),
+ (r'(in|step)\b', Keyword, 'main/inner'),
+ include('more')
+ ],
+ # Local
+ 'main/local': [
+ (_name, Name.Variable, '#pop'),
+ include('whitespace')
+ ],
+ 'more/local': [
+ (r',', Punctuation, 'main/local'),
+ include('more')
+ ],
+ # List
+ 'more/list': [
+ (r'[,:]', Punctuation, 'main'),
+ include('more')
+ ],
+ # Parameter list
+ 'main/parameters': [
+ (r'(%s)(%s*)(?=:)' % (_name, _ws),
+ bygroups(Name.Variable, using(this, state='whitespace')), '#pop'),
+ (r'(%s)(%s+)(%s)' % (_name, _ws, _name),
+ bygroups(Name.Class, using(this, state='whitespace'),
+ Name.Variable), '#pop'),
+ (r'\[+', Punctuation),
+ include('main/basic'),
+ (_name, Name.Variable, '#pop'),
+ default('#pop')
+ ],
+ 'more/parameters': [
+ (r'(:)(%s*(?=[?=,:)]))' % _ws,
+ bygroups(Punctuation, using(this, state='whitespace'))),
+ (r'[?\]]+', Punctuation),
+ (r'[:)]', Punctuation, ('#pop', 'multimethod?')),
+ (r',', Punctuation, 'main/parameters'),
+ (r'=', Punctuation, ('more/parameter', 'main')),
+ include('more')
+ ],
+ 'more/parameter': [
+ (r'(?=[,)])', Text, '#pop'),
+ include('more')
+ ],
+ 'multimethod?': [
+ (r'multimethod\b', Keyword, '#pop'),
+ include('whitespace'),
+ default('#pop')
+ ],
+
+ # Statements and expressions
+ 'more/__objref': [
+ (r',', Punctuation, 'mode'),
+ (r'\)', Operator, '#pop'),
+ include('more')
+ ],
+ 'mode': [
+ (r'(error|warn)\b', Keyword, '#pop'),
+ include('whitespace')
+ ],
+ 'catch': [
+ (r'\(+', Punctuation),
+ (_name, Name.Exception, ('#pop', 'variables')),
+ include('whitespace')
+ ],
+ 'enum': [
+ include('whitespace'),
+ (r'token\b', Keyword, ('#pop', 'constants')),
+ default(('#pop', 'constants'))
+ ],
+ 'grammar': [
+ (r'\)+', Punctuation),
+ (r'\(', Punctuation, 'grammar-tag'),
+ (r':', Punctuation, 'grammar-rules'),
+ (_name, Name.Class),
+ include('whitespace')
+ ],
+ 'grammar-tag': [
+ include('whitespace'),
+ (r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|'
+ r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|'
+ r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|"
+ r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|"
+ r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|'
+ r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|"
+ r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop')
+ ],
+ 'grammar-rules': [
+ include('string'),
+ include('whitespace'),
+ (r'(\[)(%s*)(badness)' % _ws,
+ bygroups(Punctuation, using(this, state='whitespace'), Keyword),
+ 'main'),
+ (r'->|%s|[()]' % _operator, Punctuation),
+ (_name, Name.Constant),
+ default('#pop:2')
+ ],
+ ':': [
+ (r':', Punctuation, '#pop')
+ ],
+ 'function-name': [
+ (r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol),
+ (r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'),
+ (_name, Name.Function, '#pop'),
+ include('whitespace')
+ ],
+ 'inherited': [
+ (r'<', Punctuation, ('#pop', 'classes', 'class')),
+ include('whitespace'),
+ (_name, Name.Class, '#pop'),
+ default('#pop')
+ ],
+ 'operator': [
+ (r'negate\b', Operator.Word, '#pop'),
+ include('whitespace'),
+ (_operator, Operator),
+ default('#pop')
+ ],
+ 'propertyset': [
+ (r'\(', Punctuation, ('more/parameters', 'main/parameters')),
+ (r'\{', Punctuation, ('#pop', 'object-body')),
+ include('whitespace')
+ ],
+ 'template': [
+ (r'(?=;)', Text, '#pop'),
+ include('string'),
+ (r'inherited\b', Keyword.Reserved),
+ include('whitespace'),
+ (r'->|\?|%s' % _operator, Punctuation),
+ (_name, Name.Variable)
+ ],
+
+ # Identifiers
+ 'class': [
+ (r'\*|\.{3}', Punctuation, '#pop'),
+ (r'object\b', Keyword.Reserved, '#pop'),
+ (r'transient\b', Keyword.Reserved),
+ (_name, Name.Class, '#pop'),
+ include('whitespace'),
+ default('#pop')
+ ],
+ 'classes': [
+ (r'[:,]', Punctuation, 'class'),
+ include('whitespace'),
+ (r'>', Punctuation, '#pop'),
+ default('#pop')
+ ],
+ 'constants': [
+ (r',+', Punctuation),
+ (r';', Punctuation, '#pop'),
+ (r'property\b', Keyword.Reserved),
+ (_name, Name.Constant),
+ include('whitespace')
+ ],
+ 'label': [
+ (_name, Name.Label, '#pop'),
+ include('whitespace'),
+ default('#pop')
+ ],
+ 'variables': [
+ (r',+', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ include('whitespace'),
+ (_name, Name.Variable)
+ ],
+
+ # Whitespace and comments
+ 'whitespace': [
+ (r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline),
+ Comment.Preproc),
+ (_comment_single, Comment.Single),
+ (_comment_multiline, Comment.Multiline),
+ (r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text)
+ ],
+
+ # Strings
+ 'string': [
+ (r'"""', String.Double, 'tdqs'),
+ (r"'''", String.Single, 'tsqs'),
+ (r'"', String.Double, 'dqs'),
+ (r"'", String.Single, 'sqs')
+ ],
+ 's/escape': [
+ (r'\{\{|\}\}|%s' % _escape, String.Escape)
+ ],
+ 's/verbatim': [
+ (r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|'
+ r'first\s+time|one\s+of|only|or|otherwise|'
+ r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|'
+ r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol),
+ (r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|'
+ r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape),
+ String.Interpol, ('block/embed', 'more/embed', 'main'))
+ ],
+ 's/entity': [
+ (r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity)
+ ],
+ 'tdqs': _make_string_state(True, True),
+ 'tsqs': _make_string_state(True, False),
+ 'dqs': _make_string_state(False, True),
+ 'sqs': _make_string_state(False, False),
+ 'tdqs/listing': _make_string_state(True, True, 'listing'),
+ 'tsqs/listing': _make_string_state(True, False, 'listing'),
+ 'dqs/listing': _make_string_state(False, True, 'listing'),
+ 'sqs/listing': _make_string_state(False, False, 'listing'),
+ 'tdqs/xmp': _make_string_state(True, True, 'xmp'),
+ 'tsqs/xmp': _make_string_state(True, False, 'xmp'),
+ 'dqs/xmp': _make_string_state(False, True, 'xmp'),
+ 'sqs/xmp': _make_string_state(False, False, 'xmp'),
+
+ # Tags
+ 'tdqt': _make_tag_state(True, True),
+ 'tsqt': _make_tag_state(True, False),
+ 'dqt': _make_tag_state(False, True),
+ 'sqt': _make_tag_state(False, False),
+ 'dqs/tdqt': _make_attribute_value_state(r'"', True, True),
+ 'dqs/tsqt': _make_attribute_value_state(r'"', True, False),
+ 'dqs/dqt': _make_attribute_value_state(r'"', False, True),
+ 'dqs/sqt': _make_attribute_value_state(r'"', False, False),
+ 'sqs/tdqt': _make_attribute_value_state(r"'", True, True),
+ 'sqs/tsqt': _make_attribute_value_state(r"'", True, False),
+ 'sqs/dqt': _make_attribute_value_state(r"'", False, True),
+ 'sqs/sqt': _make_attribute_value_state(r"'", False, False),
+ 'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True),
+ 'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False),
+ 'uqs/dqt': _make_attribute_value_state(_no_quote, False, True),
+ 'uqs/sqt': _make_attribute_value_state(_no_quote, False, False),
+
+ # Regular expressions
+ 'tdqr': [
+ (r'[^\\"]+', String.Regex),
+ (r'\\"*', String.Regex),
+ (r'"{3,}', String.Regex, '#pop'),
+ (r'"', String.Regex)
+ ],
+ 'tsqr': [
+ (r"[^\\']+", String.Regex),
+ (r"\\'*", String.Regex),
+ (r"'{3,}", String.Regex, '#pop'),
+ (r"'", String.Regex)
+ ],
+ 'dqr': [
+ (r'[^\\"]+', String.Regex),
+ (r'\\"?', String.Regex),
+ (r'"', String.Regex, '#pop')
+ ],
+ 'sqr': [
+ (r"[^\\']+", String.Regex),
+ (r"\\'?", String.Regex),
+ (r"'", String.Regex, '#pop')
+ ]
+ }
+
+ def get_tokens_unprocessed(self, text, **kwargs):
+ pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp)
+ if_false_level = 0
+ for index, token, value in (
+ RegexLexer.get_tokens_unprocessed(self, text, **kwargs)):
+ if if_false_level == 0: # Not in a false #if
+ if (token is Comment.Preproc and
+ re.match(r'%sif%s+(0|nil)%s*$\n?' %
+ (pp, self._ws_pp, self._ws_pp), value)):
+ if_false_level = 1
+ else: # In a false #if
+ if token is Comment.Preproc:
+ if (if_false_level == 1 and
+ re.match(r'%sel(if|se)\b' % pp, value)):
+ if_false_level = 0
+ elif re.match(r'%sif' % pp, value):
+ if_false_level += 1
+ elif re.match(r'%sendif\b' % pp, value):
+ if_false_level -= 1
+ else:
+ token = Comment
+ yield index, token, value
+
+ def analyse_text(text):
+ """This is a rather generic descriptive language without strong
+ identifiers. It looks like a 'GameMainDef' has to be present,
+ and/or a 'versionInfo' with an 'IFID' field."""
+ result = 0
+ if '__TADS' in text or 'GameMainDef' in text:
+ result += 0.2
+
+ # This is a fairly unique keyword which is likely used in source as well
+ if 'versionInfo' in text and 'IFID' in text:
+ result += 0.1
+
+ return result
diff --git a/pygments/lexers/iolang.py b/pygments/lexers/iolang.py
new file mode 100644
index 0000000..a760d1c
--- /dev/null
+++ b/pygments/lexers/iolang.py
@@ -0,0 +1,62 @@
+"""
+ pygments.lexers.iolang
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Io language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
+ Whitespace
+
+__all__ = ['IoLexer']
+
+
+class IoLexer(RegexLexer):
+ """
+ For Io (a small, prototype-based programming language) source.
+
+ .. versionadded:: 0.10
+ """
+ name = 'Io'
+ url = 'http://iolanguage.com/'
+ filenames = ['*.io']
+ aliases = ['io']
+ mimetypes = ['text/x-iosrc']
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ # Comments
+ (r'//(.*?)$', Comment.Single),
+ (r'#(.*?)$', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'/\+', Comment.Multiline, 'nestedcomment'),
+ # DoubleQuotedString
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # Operators
+ (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
+ Operator),
+ # keywords
+ (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
+ Keyword),
+ # constants
+ (r'(nil|false|true)\b', Name.Constant),
+ # names
+ (r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
+ Name.Builtin),
+ (r'[a-zA-Z_]\w*', Name),
+ # numbers
+ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+', Number.Integer)
+ ],
+ 'nestedcomment': [
+ (r'[^+/]+', Comment.Multiline),
+ (r'/\+', Comment.Multiline, '#push'),
+ (r'\+/', Comment.Multiline, '#pop'),
+ (r'[+/]', Comment.Multiline),
+ ]
+ }
diff --git a/pygments/lexers/j.py b/pygments/lexers/j.py
new file mode 100644
index 0000000..7e5f9a1
--- /dev/null
+++ b/pygments/lexers/j.py
@@ -0,0 +1,152 @@
+"""
+ pygments.lexers.j
+ ~~~~~~~~~~~~~~~~~
+
+ Lexer for the J programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include, bygroups
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Whitespace
+
+__all__ = ['JLexer']
+
+
+class JLexer(RegexLexer):
+ """
+ For J source code.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'J'
+ url = 'http://jsoftware.com/'
+ aliases = ['j']
+ filenames = ['*.ijs']
+ mimetypes = ['text/x-j']
+
+ validName = r'\b[a-zA-Z]\w*'
+
+ tokens = {
+ 'root': [
+ # Shebang script
+ (r'#!.*$', Comment.Preproc),
+
+ # Comments
+ (r'NB\..*', Comment.Single),
+ (r'(\n+\s*)(Note)', bygroups(Whitespace, Comment.Multiline),
+ 'comment'),
+ (r'(\s*)(Note.*)', bygroups(Whitespace, Comment.Single)),
+
+ # Whitespace
+ (r'\s+', Whitespace),
+
+ # Strings
+ (r"'", String, 'singlequote'),
+
+ # Definitions
+ (r'0\s+:\s*0', Name.Entity, 'nounDefinition'),
+ (r'(noun)(\s+)(define)(\s*)$', bygroups(Name.Entity, Whitespace,
+ Name.Entity, Whitespace), 'nounDefinition'),
+ (r'([1-4]|13)\s+:\s*0\b',
+ Name.Function, 'explicitDefinition'),
+ (r'(adverb|conjunction|dyad|monad|verb)(\s+)(define)\b',
+ bygroups(Name.Function, Whitespace, Name.Function),
+ 'explicitDefinition'),
+
+ # Flow Control
+ (words(('for_', 'goto_', 'label_'), suffix=validName+r'\.'), Name.Label),
+ (words((
+ 'assert', 'break', 'case', 'catch', 'catchd',
+ 'catcht', 'continue', 'do', 'else', 'elseif',
+ 'end', 'fcase', 'for', 'if', 'return',
+ 'select', 'throw', 'try', 'while', 'whilst',
+ ), suffix=r'\.'), Name.Label),
+
+ # Variable Names
+ (validName, Name.Variable),
+
+ # Standard Library
+ (words((
+ 'ARGV', 'CR', 'CRLF', 'DEL', 'Debug',
+ 'EAV', 'EMPTY', 'FF', 'JVERSION', 'LF',
+ 'LF2', 'Note', 'TAB', 'alpha17', 'alpha27',
+ 'apply', 'bind', 'boxopen', 'boxxopen', 'bx',
+ 'clear', 'cutLF', 'cutopen', 'datatype', 'def',
+ 'dfh', 'drop', 'each', 'echo', 'empty',
+ 'erase', 'every', 'evtloop', 'exit', 'expand',
+ 'fetch', 'file2url', 'fixdotdot', 'fliprgb', 'getargs',
+ 'getenv', 'hfd', 'inv', 'inverse', 'iospath',
+ 'isatty', 'isutf8', 'items', 'leaf', 'list',
+ 'nameclass', 'namelist', 'names', 'nc',
+ 'nl', 'on', 'pick', 'rows',
+ 'script', 'scriptd', 'sign', 'sminfo', 'smoutput',
+ 'sort', 'split', 'stderr', 'stdin', 'stdout',
+ 'table', 'take', 'timespacex', 'timex', 'tmoutput',
+ 'toCRLF', 'toHOST', 'toJ', 'tolower', 'toupper',
+ 'type', 'ucp', 'ucpcount', 'usleep', 'utf8',
+ 'uucp',
+ )), Name.Function),
+
+ # Copula
+ (r'=[.:]', Operator),
+
+ # Builtins
+ (r'[-=+*#$%@!~`^&";:.,<>{}\[\]\\|/?]', Operator),
+
+ # Short Keywords
+ (r'[abCdDeEfHiIjLMoprtT]\.', Keyword.Reserved),
+ (r'[aDiLpqsStux]\:', Keyword.Reserved),
+ (r'(_[0-9])\:', Keyword.Constant),
+
+ # Parens
+ (r'\(', Punctuation, 'parentheses'),
+
+ # Numbers
+ include('numbers'),
+ ],
+
+ 'comment': [
+ (r'[^)]', Comment.Multiline),
+ (r'^\)', Comment.Multiline, '#pop'),
+ (r'[)]', Comment.Multiline),
+ ],
+
+ 'explicitDefinition': [
+ (r'\b[nmuvxy]\b', Name.Decorator),
+ include('root'),
+ (r'[^)]', Name),
+ (r'^\)', Name.Label, '#pop'),
+ (r'[)]', Name),
+ ],
+
+ 'numbers': [
+ (r'\b_{1,2}\b', Number),
+ (r'_?\d+(\.\d+)?(\s*[ejr]\s*)_?\d+(\.?=\d+)?', Number),
+ (r'_?\d+\.(?=\d+)', Number.Float),
+ (r'_?\d+x', Number.Integer.Long),
+ (r'_?\d+', Number.Integer),
+ ],
+
+ 'nounDefinition': [
+ (r'[^)]+', String),
+ (r'^\)', Name.Label, '#pop'),
+ (r'[)]', String),
+ ],
+
+ 'parentheses': [
+ (r'\)', Punctuation, '#pop'),
+ # include('nounDefinition'),
+ include('explicitDefinition'),
+ include('root'),
+ ],
+
+ 'singlequote': [
+ (r"[^']+", String),
+ (r"''", String),
+ (r"'", String, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py
new file mode 100644
index 0000000..eed71f9
--- /dev/null
+++ b/pygments/lexers/javascript.py
@@ -0,0 +1,1588 @@
+"""
+ pygments.lexers.javascript
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for JavaScript and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import bygroups, combined, default, do_insertions, include, \
+ inherit, Lexer, RegexLexer, this, using, words, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Other, Generic, Whitespace
+from pygments.util import get_bool_opt
+import pygments.unistring as uni
+
+__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer',
+ 'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer',
+ 'CoffeeScriptLexer', 'MaskLexer', 'EarlGreyLexer', 'JuttleLexer',
+ 'NodeConsoleLexer']
+
+JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
+ ']|\\\\u[a-fA-F0-9]{4})')
+JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
+ 'Mn', 'Mc', 'Nd', 'Pc') +
+ '\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
+JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
+
+
+class JavascriptLexer(RegexLexer):
+ """
+ For JavaScript source code.
+ """
+
+ name = 'JavaScript'
+ url = 'https://www.ecma-international.org/publications-and-standards/standards/ecma-262/'
+ aliases = ['javascript', 'js']
+ filenames = ['*.js', '*.jsm', '*.mjs', '*.cjs']
+ mimetypes = ['application/javascript', 'application/x-javascript',
+ 'text/x-javascript', 'text/javascript']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Whitespace),
+ (r'<!--', Comment),
+ (r'//.*?$', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'root': [
+ (r'\A#! ?/.*?$', Comment.Hashbang), # recognized by node.js
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+
+ # Numeric literals
+ (r'0[bB][01]+n?', Number.Bin),
+ (r'0[oO]?[0-7]+n?', Number.Oct), # Browsers support "0o7" and "07" (< ES5) notations
+ (r'0[xX][0-9a-fA-F]+n?', Number.Hex),
+ (r'[0-9]+n', Number.Integer), # Javascript BigInt requires an "n" postfix
+ # Javascript doesn't have actual integer literals, so every other
+ # numeric literal is handled by the regex below (including "normal")
+ # integers
+ (r'(\.[0-9]+|[0-9]+\.[0-9]*|[0-9]+)([eE][-+]?[0-9]+)?', Number.Float),
+
+ (r'\.\.\.|=>', Punctuation),
+ (r'\+\+|--|~|\?\?=?|\?|:|\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|(?:\*\*|\|\||&&|[-<>+*%&|^/]))=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+
+ (r'(typeof|instanceof|in|void|delete|new)\b', Operator.Word, 'slashstartsregex'),
+
+ # Match stuff like: constructor
+ (r'\b(constructor|from|as)\b', Keyword.Reserved),
+
+ (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
+ r'throw|try|catch|finally|yield|await|async|this|of|static|export|'
+ r'import|debugger|extends|super)\b', Keyword, 'slashstartsregex'),
+ (r'(var|let|const|with|function|class)\b', Keyword.Declaration, 'slashstartsregex'),
+
+ (r'(abstract|boolean|byte|char|double|enum|final|float|goto|'
+ r'implements|int|interface|long|native|package|private|protected|'
+ r'public|short|synchronized|throws|transient|volatile)\b', Keyword.Reserved),
+ (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
+
+ (r'(Array|Boolean|Date|BigInt|Function|Math|ArrayBuffer|'
+ r'Number|Object|RegExp|String|Promise|Proxy|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'eval|isFinite|isNaN|parseFloat|parseInt|DataView|'
+ r'document|window|globalThis|global|Symbol|Intl|'
+ r'WeakSet|WeakMap|Set|Map|Reflect|JSON|Atomics|'
+ r'Int(?:8|16|32)Array|BigInt64Array|Float32Array|Float64Array|'
+ r'Uint8ClampedArray|Uint(?:8|16|32)Array|BigUint64Array)\b', Name.Builtin),
+
+ (r'((?:Eval|Internal|Range|Reference|Syntax|Type|URI)?Error)\b', Name.Exception),
+
+ # Match stuff like: super(argument, list)
+ (r'(super)(\s*)(\([\w,?.$\s]+\s*\))',
+ bygroups(Keyword, Whitespace), 'slashstartsregex'),
+ # Match stuff like: function() {...}
+ (r'([a-zA-Z_?.$][\w?.$]*)(?=\(\) \{)', Name.Other, 'slashstartsregex'),
+
+ (JS_IDENT, Name.Other),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'`', String.Backtick, 'interp'),
+ ],
+ 'interp': [
+ (r'`', String.Backtick, '#pop'),
+ (r'\\.', String.Backtick),
+ (r'\$\{', String.Interpol, 'interp-inside'),
+ (r'\$', String.Backtick),
+ (r'[^`\\$]+', String.Backtick),
+ ],
+ 'interp-inside': [
+ # TODO: should this include single-line comments and allow nesting strings?
+ (r'\}', String.Interpol, '#pop'),
+ include('root'),
+ ],
+ }
+
+
+class TypeScriptLexer(JavascriptLexer):
+ """
+ For TypeScript source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'TypeScript'
+ url = 'https://www.typescriptlang.org/'
+ aliases = ['typescript', 'ts']
+ filenames = ['*.ts']
+ mimetypes = ['application/x-typescript', 'text/x-typescript']
+
+ # Higher priority than the TypoScriptLexer, as TypeScript is far more
+ # common these days
+ priority = 0.5
+
+ tokens = {
+ 'root': [
+ (r'(abstract|implements|private|protected|public|readonly)\b',
+ Keyword, 'slashstartsregex'),
+ (r'(enum|interface|override)\b', Keyword.Declaration, 'slashstartsregex'),
+ (r'\b(declare|type)\b', Keyword.Reserved),
+ # Match variable type keywords
+ (r'\b(string|boolean|number)\b', Keyword.Type),
+ # Match stuff like: module name {...}
+ (r'\b(module)(\s*)([\w?.$]+)(\s*)',
+ bygroups(Keyword.Reserved, Whitespace, Name.Other, Whitespace), 'slashstartsregex'),
+ # Match stuff like: (function: return type)
+ (r'([\w?.$]+)(\s*)(:)(\s*)([\w?.$]+)',
+ bygroups(Name.Other, Whitespace, Operator, Whitespace, Keyword.Type)),
+ # Match stuff like: Decorators
+ (r'@' + JS_IDENT, Keyword.Declaration),
+ inherit,
+ ],
+ }
+
+
+class KalLexer(RegexLexer):
+ """
+ For Kal source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Kal'
+ url = 'http://rzimmerman.github.io/kal'
+ aliases = ['kal']
+ filenames = ['*.kal']
+ mimetypes = ['text/kal', 'application/kal']
+
+ flags = re.DOTALL
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Whitespace),
+ (r'###[^#].*?###', Comment.Multiline),
+ (r'(#(?!##[^#]).*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ ],
+ 'functiondef': [
+ (r'([$a-zA-Z_][\w$]*)(\s*)', bygroups(Name.Function, Whitespace),
+ '#pop'),
+ include('commentsandwhitespace'),
+ ],
+ 'classdef': [
+ (r'\b(inherits)(\s+)(from)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'([$a-zA-Z_][\w$]*)(?=\s*\n)', Name.Class, '#pop'),
+ (r'[$a-zA-Z_][\w$]*\b', Name.Class),
+ include('commentsandwhitespace'),
+ ],
+ 'listcomprehension': [
+ (r'\]', Punctuation, '#pop'),
+ (r'\b(property|value)\b', Keyword),
+ include('root'),
+ ],
+ 'waitfor': [
+ (r'\n', Whitespace, '#pop'),
+ (r'\bfrom\b', Keyword),
+ include('root'),
+ ],
+ 'root': [
+ include('commentsandwhitespace'),
+ (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gimuysd]+\b|\B)', String.Regex),
+ (r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
+ Operator),
+ (r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|'
+ r'doesnt\s+exist)\b', Operator.Word),
+ (r'(\([^()]+\))?(\s*)(>)',
+ bygroups(Name.Function, Whitespace, Punctuation)),
+ (r'[{(]', Punctuation),
+ (r'\[', Punctuation, 'listcomprehension'),
+ (r'[})\].,]', Punctuation),
+ (r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
+ (r'\bclass\b', Keyword.Declaration, 'classdef'),
+ (r'\b(safe(?=\s))?(\s*)(wait(?=\s))(\s+)(for)\b',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace,
+ Keyword), 'waitfor'),
+ (r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance),
+ (r'(?<![.$])(run)(\s+)(in)(\s+)(parallel)\b',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
+ (r'(?<![.$])(for)(\s+)(parallel|series)?\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(?<![.$])(except)(\s+)(when)?\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(?<![.$])(fail)(\s+)(with)?\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(?<![.$])(inherits)(\s+)(from)?\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(?<![.$])(for)(\s+)(parallel|series)?\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (words((
+ 'in', 'of', 'while', 'until', 'break', 'return', 'continue',
+ 'when', 'if', 'unless', 'else', 'otherwise', 'throw', 'raise',
+ 'try', 'catch', 'finally', 'new', 'delete', 'typeof',
+ 'instanceof', 'super'), prefix=r'(?<![.$])', suffix=r'\b'),
+ Keyword),
+ (words((
+ 'true', 'false', 'yes', 'no', 'on', 'off', 'null', 'nothing',
+ 'none', 'NaN', 'Infinity', 'undefined'), prefix=r'(?<![.$])',
+ suffix=r'\b'), Keyword.Constant),
+ (words((
+ 'Array', 'Boolean', 'Date', 'Error', 'Function', 'Math',
+ 'Number', 'Object', 'RegExp', 'String', 'decodeURI',
+ 'decodeURIComponent', 'encodeURI', 'encodeURIComponent', 'eval',
+ 'isFinite', 'isNaN', 'isSafeInteger', 'parseFloat', 'parseInt',
+ 'document', 'window', 'globalThis', 'Symbol', 'print'),
+ suffix=r'\b'), Name.Builtin),
+ (r'([$a-zA-Z_][\w.$]*)(\s*)(:|[+\-*/]?\=)?\b',
+ bygroups(Name.Variable, Whitespace, Operator)),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ ('"""', String, 'tdqs'),
+ ("'''", String, 'tsqs'),
+ ('"', String, 'dqs'),
+ ("'", String, 'sqs'),
+ ],
+ 'strings': [
+ (r'[^#\\\'"]+', String),
+ # note that all kal strings are multi-line.
+ # hashmarks, quotes and backslashes must be parsed one at a time
+ ],
+ 'interpoling_string': [
+ (r'\}', String.Interpol, "#pop"),
+ include('root')
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\.|\'', String), # double-quoted string don't need ' escapes
+ (r'#\{', String.Interpol, "interpoling_string"),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r'#|\\.|"', String), # single quoted strings don't need " escapses
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ (r'\\.|\'|"', String), # no need to escape quotes in triple-string
+ (r'#\{', String.Interpol, "interpoling_string"),
+ include('strings'),
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
+ include('strings')
+ ],
+ }
+
+
+class LiveScriptLexer(RegexLexer):
+ """
+ For LiveScript source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'LiveScript'
+ url = 'https://livescript.net/'
+ aliases = ['livescript', 'live-script']
+ filenames = ['*.ls']
+ mimetypes = ['text/livescript']
+
+ flags = re.DOTALL
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Whitespace),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'(#.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ ],
+ 'multilineregex': [
+ include('commentsandwhitespace'),
+ (r'//([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ (r'/', String.Regex),
+ (r'[^/#]+', String.Regex)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'//', String.Regex, ('#pop', 'multilineregex')),
+ (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ (r'/', Operator, '#pop'),
+ default('#pop'),
+ ],
+ 'root': [
+ (r'\A(?=\s|/)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
+ r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
+ (r'\+\+|&&|(?<![.$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
+ r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
+ r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
+ r'[+*`%&|^/])=?',
+ Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(?<![.$])(for|own|in|of|while|until|loop|break|'
+ r'return|continue|switch|when|then|if|unless|else|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
+ r'extends|this|class|by|const|var|to|til)\b', Keyword,
+ 'slashstartsregex'),
+ (r'(?<![.$])(true|false|yes|no|on|off|'
+ r'null|NaN|Infinity|undefined|void)\b',
+ Keyword.Constant),
+ (r'(Array|Boolean|Date|Error|Function|Math|'
+ r'Number|Object|RegExp|String|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|'
+ r'globalThis|Symbol|Symbol|BigInt)\b', Name.Builtin),
+ (r'([$a-zA-Z_][\w.\-:$]*)(\s*)([:=])(\s+)',
+ bygroups(Name.Variable, Whitespace, Operator, Whitespace),
+ 'slashstartsregex'),
+ (r'(@[$a-zA-Z_][\w.\-:$]*)(\s*)([:=])(\s+)',
+ bygroups(Name.Variable.Instance, Whitespace, Operator,
+ Whitespace),
+ 'slashstartsregex'),
+ (r'@', Name.Other, 'slashstartsregex'),
+ (r'@?[$a-zA-Z_][\w-]*', Name.Other, 'slashstartsregex'),
+ (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
+ (r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
+ ('"""', String, 'tdqs'),
+ ("'''", String, 'tsqs'),
+ ('"', String, 'dqs'),
+ ("'", String, 'sqs'),
+ (r'\\\S+', String),
+ (r'<\[.*?\]>', String),
+ ],
+ 'strings': [
+ (r'[^#\\\'"]+', String),
+ # note that all coffee script strings are multi-line.
+ # hashmarks, quotes and backslashes must be parsed one at a time
+ ],
+ 'interpoling_string': [
+ (r'\}', String.Interpol, "#pop"),
+ include('root')
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\.|\'', String), # double-quoted string don't need ' escapes
+ (r'#\{', String.Interpol, "interpoling_string"),
+ (r'#', String),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r'#|\\.|"', String), # single quoted strings don't need " escapses
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ (r'\\.|\'|"', String), # no need to escape quotes in triple-string
+ (r'#\{', String.Interpol, "interpoling_string"),
+ (r'#', String),
+ include('strings'),
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
+ include('strings')
+ ],
+ }
+
+
+class DartLexer(RegexLexer):
+ """
+ For Dart source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Dart'
+ url = 'http://dart.dev/'
+ aliases = ['dart']
+ filenames = ['*.dart']
+ mimetypes = ['text/x-dart']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ include('string_literal'),
+ (r'#!(.*?)$', Comment.Preproc),
+ (r'\b(import|export)\b', Keyword, 'import_decl'),
+ (r'\b(library|source|part of|part)\b', Keyword),
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'\b(class|extension|mixin)\b(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'class'),
+ (r'\b(as|assert|break|case|catch|const|continue|default|do|else|finally|'
+ r'for|if|in|is|new|rethrow|return|super|switch|this|throw|try|while)\b',
+ Keyword),
+ (r'\b(abstract|async|await|const|covariant|extends|external|factory|final|'
+ r'get|implements|late|native|on|operator|required|set|static|sync|typedef|'
+ r'var|with|yield)\b', Keyword.Declaration),
+ (r'\b(bool|double|dynamic|int|num|Function|Never|Null|Object|String|void)\b',
+ Keyword.Type),
+ (r'\b(false|null|true)\b', Keyword.Constant),
+ (r'[~!%^&*+=|?:<>/-]|as\b', Operator),
+ (r'@[a-zA-Z_$]\w*', Name.Decorator),
+ (r'[a-zA-Z_$]\w*:', Name.Label),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'[(){}\[\],.;]', Punctuation),
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ # DIGIT+ (‘.’ DIGIT*)? EXPONENT?
+ (r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
+ (r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
+ (r'\n', Whitespace)
+ # pseudo-keyword negate intentionally left out
+ ],
+ 'class': [
+ (r'[a-zA-Z_$]\w*', Name.Class, '#pop')
+ ],
+ 'import_decl': [
+ include('string_literal'),
+ (r'\s+', Whitespace),
+ (r'\b(as|deferred|show|hide)\b', Keyword),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'\,', Punctuation),
+ (r'\;', Punctuation, '#pop')
+ ],
+ 'string_literal': [
+ # Raw strings.
+ (r'r"""([\w\W]*?)"""', String.Double),
+ (r"r'''([\w\W]*?)'''", String.Single),
+ (r'r"(.*?)"', String.Double),
+ (r"r'(.*?)'", String.Single),
+ # Normal Strings.
+ (r'"""', String.Double, 'string_double_multiline'),
+ (r"'''", String.Single, 'string_single_multiline'),
+ (r'"', String.Double, 'string_double'),
+ (r"'", String.Single, 'string_single')
+ ],
+ 'string_common': [
+ (r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z'\"$\\])",
+ String.Escape),
+ (r'(\$)([a-zA-Z_]\w*)', bygroups(String.Interpol, Name)),
+ (r'(\$\{)(.*?)(\})',
+ bygroups(String.Interpol, using(this), String.Interpol))
+ ],
+ 'string_double': [
+ (r'"', String.Double, '#pop'),
+ (r'[^"$\\\n]+', String.Double),
+ include('string_common'),
+ (r'\$+', String.Double)
+ ],
+ 'string_double_multiline': [
+ (r'"""', String.Double, '#pop'),
+ (r'[^"$\\]+', String.Double),
+ include('string_common'),
+ (r'(\$|\")+', String.Double)
+ ],
+ 'string_single': [
+ (r"'", String.Single, '#pop'),
+ (r"[^'$\\\n]+", String.Single),
+ include('string_common'),
+ (r'\$+', String.Single)
+ ],
+ 'string_single_multiline': [
+ (r"'''", String.Single, '#pop'),
+ (r'[^\'$\\]+', String.Single),
+ include('string_common'),
+ (r'(\$|\')+', String.Single)
+ ]
+ }
+
+
+class LassoLexer(RegexLexer):
+ """
+ For Lasso source code, covering both Lasso 9
+ syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
+ HTML, use the `LassoHtmlLexer`.
+
+ Additional options accepted:
+
+ `builtinshighlighting`
+ If given and ``True``, highlight builtin types, traits, methods, and
+ members (default: ``True``).
+ `requiredelimiters`
+ If given and ``True``, only highlight code between delimiters as Lasso
+ (default: ``False``).
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Lasso'
+ aliases = ['lasso', 'lassoscript']
+ filenames = ['*.lasso', '*.lasso[89]']
+ alias_filenames = ['*.incl', '*.inc', '*.las']
+ mimetypes = ['text/x-lasso']
+ flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'^#![ \S]+lasso9\b', Comment.Preproc, 'lasso'),
+ (r'(?=\[|<)', Other, 'delimiters'),
+ (r'\s+', Whitespace),
+ default(('delimiters', 'lassofile')),
+ ],
+ 'delimiters': [
+ (r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
+ (r'\[noprocess\]', Comment.Preproc, 'noprocess'),
+ (r'\[', Comment.Preproc, 'squarebrackets'),
+ (r'<\?(lasso(script)?|=)', Comment.Preproc, 'anglebrackets'),
+ (r'<(!--.*?-->)?', Other),
+ (r'[^[<]+', Other),
+ ],
+ 'nosquarebrackets': [
+ (r'\[noprocess\]', Comment.Preproc, 'noprocess'),
+ (r'\[', Other),
+ (r'<\?(lasso(script)?|=)', Comment.Preproc, 'anglebrackets'),
+ (r'<(!--.*?-->)?', Other),
+ (r'[^[<]+', Other),
+ ],
+ 'noprocess': [
+ (r'\[/noprocess\]', Comment.Preproc, '#pop'),
+ (r'\[', Other),
+ (r'[^[]', Other),
+ ],
+ 'squarebrackets': [
+ (r'\]', Comment.Preproc, '#pop'),
+ include('lasso'),
+ ],
+ 'anglebrackets': [
+ (r'\?>', Comment.Preproc, '#pop'),
+ include('lasso'),
+ ],
+ 'lassofile': [
+ (r'\]|\?>', Comment.Preproc, '#pop'),
+ include('lasso'),
+ ],
+ 'whitespacecomments': [
+ (r'\s+', Whitespace),
+ (r'(//.*?)(\s*)$', bygroups(Comment.Single, Whitespace)),
+ (r'/\*\*!.*?\*/', String.Doc),
+ (r'/\*.*?\*/', Comment.Multiline),
+ ],
+ 'lasso': [
+ # whitespace/comments
+ include('whitespacecomments'),
+
+ # literals
+ (r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
+ (r'0x[\da-f]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'(infinity|NaN)\b', Number),
+ (r"'", String.Single, 'singlestring'),
+ (r'"', String.Double, 'doublestring'),
+ (r'`[^`]*`', String.Backtick),
+
+ # names
+ (r'\$[a-z_][\w.]*', Name.Variable),
+ (r'#([a-z_][\w.]*|\d+\b)', Name.Variable.Instance),
+ (r"(\.)(\s*)('[a-z_][\w.]*')",
+ bygroups(Name.Builtin.Pseudo, Whitespace, Name.Variable.Class)),
+ (r"(self)(\s*)(->)(\s*)('[a-z_][\w.]*')",
+ bygroups(Name.Builtin.Pseudo, Whitespace, Operator, Whitespace,
+ Name.Variable.Class)),
+ (r'(\.\.?)(\s*)([a-z_][\w.]*(=(?!=))?)',
+ bygroups(Name.Builtin.Pseudo, Whitespace, Name.Other.Member)),
+ (r'(->\\?|&)(\s*)([a-z_][\w.]*(=(?!=))?)',
+ bygroups(Operator, Whitespace, Name.Other.Member)),
+ (r'(?<!->)(self|inherited|currentcapture|givenblock)\b',
+ Name.Builtin.Pseudo),
+ (r'-(?!infinity)[a-z_][\w.]*', Name.Attribute),
+ (r'(::)(\s*)([a-z_][\w.]*)',
+ bygroups(Punctuation, Whitespace, Name.Label)),
+ (r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
+ r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
+ r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
+ r'Error_InvalidDatabase|Error_InvalidPassword|'
+ r'Error_InvalidUsername|Error_ModuleNotFound|'
+ r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
+ r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
+ r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
+ r'Error_UpdateError)\b', Name.Exception),
+
+ # definitions
+ (r'(define)(\s+)([a-z_][\w.]*)(\s*)(=>)(\s*)(type|trait|thread)\b',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class,
+ Whitespace, Operator, Whitespace, Keyword)),
+ (r'(define)(\s+)([a-z_][\w.]*)(\s*)(->)(\s*)([a-z_][\w.]*=?|[-+*/%])',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class,
+ Whitespace, Operator, Whitespace, Name.Function),
+ 'signature'),
+ (r'(define)(\s+)([a-z_][\w.]*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Function), 'signature'),
+ (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])'
+ r'(?=\s*\())', bygroups(Keyword, Whitespace, Name.Function),
+ 'signature'),
+ (r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+
+ # keywords
+ (r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant),
+ (r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration),
+ (r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
+ r'null|boolean|bytes|keyword|list|locale|queue|set|stack|'
+ r'staticarray)\b', Keyword.Type),
+ (r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Whitespace, Keyword)),
+ (r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Whitespace, Name)),
+ (r'require\b', Keyword, 'requiresection'),
+ (r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)),
+ (r'(/?)(Cache|Database_Names|Database_SchemaNames|'
+ r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
+ r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
+ r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
+ r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|Link_FirstRecord|'
+ r'Link_LastGroup|Link_LastRecord|Link_NextGroup|Link_NextRecord|'
+ r'Link_PrevGroup|Link_PrevRecord|Log|Loop|Output_None|Portal|'
+ r'Private|Protect|Records|Referer|Referrer|Repeating|ResultSet|'
+ r'Rows|Search_Args|Search_Arguments|Select|Sort_Args|'
+ r'Sort_Arguments|Thread_Atomic|Value_List|While|Abort|Case|Else|'
+ r'Fail_If|Fail_IfNot|Fail|If_Empty|If_False|If_Null|If_True|'
+ r'Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|Return|'
+ r'Return_Value|Run_Children|SOAP_DefineTag|SOAP_LastRequest|'
+ r'SOAP_LastResponse|Tag_Name|ascending|average|by|define|'
+ r'descending|do|equals|frozen|group|handle_failure|import|in|into|'
+ r'join|let|match|max|min|on|order|parent|protected|provide|public|'
+ r'require|returnhome|skip|split_thread|sum|take|thread|to|trait|'
+ r'type|where|with|yield|yieldhome)\b',
+ bygroups(Punctuation, Keyword)),
+
+ # other
+ (r',', Punctuation, 'commamember'),
+ (r'(and|or|not)\b', Operator.Word),
+ (r'([a-z_][\w.]*)(\s*)(::)(\s*)([a-z_][\w.]*)?(\s*=(?!=))',
+ bygroups(Name, Whitespace, Punctuation, Whitespace, Name.Label,
+ Operator)),
+ (r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
+ (r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b',
+ bygroups(Operator, Operator.Word)),
+ (r':=|[-+*/%=<>&|!?\\]+', Operator),
+ (r'[{}():;,@^]', Punctuation),
+ ],
+ 'singlestring': [
+ (r"'", String.Single, '#pop'),
+ (r"[^'\\]+", String.Single),
+ include('escape'),
+ (r"\\", String.Single),
+ ],
+ 'doublestring': [
+ (r'"', String.Double, '#pop'),
+ (r'[^"\\]+', String.Double),
+ include('escape'),
+ (r'\\', String.Double),
+ ],
+ 'escape': [
+ (r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:\n\r]+:|'
+ r'[abefnrtv?"\'\\]|$)', String.Escape),
+ ],
+ 'signature': [
+ (r'=>', Operator, '#pop'),
+ (r'\)', Punctuation, '#pop'),
+ (r'[(,]', Punctuation, 'parameter'),
+ include('lasso'),
+ ],
+ 'parameter': [
+ (r'\)', Punctuation, '#pop'),
+ (r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
+ (r'\.\.\.', Name.Builtin.Pseudo),
+ include('lasso'),
+ ],
+ 'requiresection': [
+ (r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'),
+ (r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name),
+ (r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'),
+ (r'(::)(\s*)([a-z_][\w.]*)',
+ bygroups(Punctuation, Whitespace, Name.Label)),
+ (r',', Punctuation),
+ include('whitespacecomments'),
+ ],
+ 'requiresignature': [
+ (r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
+ (r'\)', Punctuation, '#pop:2'),
+ (r'-?[a-z_][\w.]*', Name.Attribute),
+ (r'(::)(\s*)([a-z_][\w.]*)',
+ bygroups(Punctuation, Whitespace, Name.Label)),
+ (r'\.\.\.', Name.Builtin.Pseudo),
+ (r'[(,]', Punctuation),
+ include('whitespacecomments'),
+ ],
+ 'commamember': [
+ (r'(([a-z_][\w.]*=?|[-+*/%])'
+ r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
+ Name.Function, 'signature'),
+ include('whitespacecomments'),
+ default('#pop'),
+ ],
+ }
+
+ def __init__(self, **options):
+ self.builtinshighlighting = get_bool_opt(
+ options, 'builtinshighlighting', True)
+ self.requiredelimiters = get_bool_opt(
+ options, 'requiredelimiters', False)
+
+ self._builtins = set()
+ self._members = set()
+ if self.builtinshighlighting:
+ from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS
+ for key, value in BUILTINS.items():
+ self._builtins.update(value)
+ for key, value in MEMBERS.items():
+ self._members.update(value)
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ stack = ['root']
+ if self.requiredelimiters:
+ stack.append('delimiters')
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text, stack):
+ if (token is Name.Other and value.lower() in self._builtins or
+ token is Name.Other.Member and
+ value.lower().rstrip('=') in self._members):
+ yield index, Name.Builtin, value
+ continue
+ yield index, token, value
+
+ def analyse_text(text):
+ rv = 0.0
+ if 'bin/lasso9' in text:
+ rv += 0.8
+ if re.search(r'<\?lasso', text, re.I):
+ rv += 0.4
+ if re.search(r'local\(', text, re.I):
+ rv += 0.4
+ return rv
+
+
+class ObjectiveJLexer(RegexLexer):
+ """
+ For Objective-J source code with preprocessor directives.
+
+ .. versionadded:: 1.3
+ """
+
+ name = 'Objective-J'
+ aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
+ filenames = ['*.j']
+ mimetypes = ['text/x-objective-j']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//[^\n]*\n|/[*](?:[^*]|[*][^/])*[*]/)*'
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+
+ # function definition
+ (r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)',
+ bygroups(using(this), using(this, state='function_signature'),
+ using(this))),
+
+ # class definition
+ (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Whitespace),
+ 'classname'),
+ (r'(@class|@protocol)(\s*)', bygroups(Keyword, Whitespace),
+ 'forward_classname'),
+ (r'(\s*)(@end)(\s*)', bygroups(Whitespace, Keyword, Whitespace)),
+
+ include('statements'),
+ ('[{()}]', Punctuation),
+ (';', Punctuation),
+ ],
+ 'whitespace': [
+ (r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
+ bygroups(Comment.Preproc, Whitespace, String.Double)),
+ (r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
+ bygroups(Comment.Preproc, Whitespace, String.Double)),
+ (r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
+ bygroups(Comment.Preproc, Whitespace, String.Double)),
+ (r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
+ bygroups(Comment.Preproc, Whitespace, String.Double)),
+
+ (r'#if\s+0', Comment.Preproc, 'if0'),
+ (r'#', Comment.Preproc, 'macro'),
+
+ (r'\s+', Whitespace),
+ (r'(\\)(\n)',
+ bygroups(String.Escape, Whitespace)), # line continuation
+ (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'<!--', Comment),
+ ],
+ 'slashstartsregex': [
+ include('whitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop'),
+ ],
+ 'badregex': [
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'statements': [
+ (r'(L|@)?"', String, 'string'),
+ (r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
+ String.Char),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
+ (r'0[0-7]+[Ll]?', Number.Oct),
+ (r'\d+[Ll]?', Number.Integer),
+
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?',
+ Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+
+ (r'(for|in|while|do|break|return|continue|switch|case|default|if|'
+ r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+ r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
+
+ (r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
+
+ (r'(@selector|@private|@protected|@public|@encode|'
+ r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
+ r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
+
+ (r'(int|long|float|short|double|char|unsigned|signed|void|'
+ r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
+ Keyword.Type),
+
+ (r'(self|super)\b', Name.Builtin),
+
+ (r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
+ (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
+ (r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
+ r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
+ r'SQRT2)\b', Keyword.Constant),
+
+ (r'(Array|Boolean|Date|Error|Function|Math|'
+ r'Number|Object|RegExp|String|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
+ r'window|globalThis|Symbol)\b', Name.Builtin),
+
+ (r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()',
+ bygroups(Name.Function, using(this))),
+
+ (r'[$a-zA-Z_]\w*', Name),
+ ],
+ 'classname': [
+ # interface definition that inherits
+ (r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws +
+ r')([a-zA-Z_]\w*)?',
+ bygroups(Name.Class, using(this), Name.Class), '#pop'),
+ # interface definition for a category
+ (r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))',
+ bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
+ # simple interface / implementation
+ (r'([a-zA-Z_]\w*)', Name.Class, '#pop'),
+ ],
+ 'forward_classname': [
+ (r'([a-zA-Z_]\w*)(\s*)(,)(\s*)',
+ bygroups(Name.Class, Whitespace, Text, Whitespace), '#push'),
+ (r'([a-zA-Z_]\w*)(\s*)(;?)',
+ bygroups(Name.Class, Whitespace, Text), '#pop'),
+ ],
+ 'function_signature': [
+ include('whitespace'),
+
+ # start of a selector w/ parameters
+ (r'(\(' + _ws + r')' # open paren
+ r'([a-zA-Z_]\w+)' # return type
+ r'(' + _ws + r'\)' + _ws + r')' # close paren
+ r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
+ bygroups(using(this), Keyword.Type, using(this),
+ Name.Function), 'function_parameters'),
+
+ # no-param function
+ (r'(\(' + _ws + r')' # open paren
+ r'([a-zA-Z_]\w+)' # return type
+ r'(' + _ws + r'\)' + _ws + r')' # close paren
+ r'([$a-zA-Z_]\w+)', # function name
+ bygroups(using(this), Keyword.Type, using(this),
+ Name.Function), "#pop"),
+
+ # no return type given, start of a selector w/ parameters
+ (r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
+ bygroups(Name.Function), 'function_parameters'),
+
+ # no return type given, no-param function
+ (r'([$a-zA-Z_]\w+)', # function name
+ bygroups(Name.Function), "#pop"),
+
+ default('#pop'),
+ ],
+ 'function_parameters': [
+ include('whitespace'),
+
+ # parameters
+ (r'(\(' + _ws + ')' # open paren
+ r'([^)]+)' # type
+ r'(' + _ws + r'\)' + _ws + r')' # close paren
+ r'([$a-zA-Z_]\w+)', # param name
+ bygroups(using(this), Keyword.Type, using(this), Text)),
+
+ # one piece of a selector name
+ (r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
+ Name.Function),
+
+ # smallest possible selector piece
+ (r'(:)', Name.Function),
+
+ # var args
+ (r'(,' + _ws + r'\.\.\.)', using(this)),
+
+ # param name
+ (r'([$a-zA-Z_]\w+)', Text),
+ ],
+ 'expression': [
+ (r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function,
+ Punctuation)),
+ (r'(\))', Punctuation, "#pop"),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace), '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Whitespace),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
+ (r'(.*?)(\n)', bygroups(Comment, Whitespace)),
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r'^\s*@import\s+[<"]', text, re.MULTILINE):
+ # special directive found in most Objective-J files
+ return True
+ return False
+
+
+class CoffeeScriptLexer(RegexLexer):
+ """
+ For CoffeeScript source code.
+
+ .. versionadded:: 1.3
+ """
+
+ name = 'CoffeeScript'
+ url = 'http://coffeescript.org'
+ aliases = ['coffeescript', 'coffee-script', 'coffee']
+ filenames = ['*.coffee']
+ mimetypes = ['text/coffeescript']
+
+ _operator_re = (
+ r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
+ r'\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|\^/])=?')
+
+ flags = re.DOTALL
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Whitespace),
+ (r'###[^#].*?###', Comment.Multiline),
+ (r'(#(?!##[^#]).*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ ],
+ 'multilineregex': [
+ (r'[^/#]+', String.Regex),
+ (r'///([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ (r'#\{', String.Interpol, 'interpoling_string'),
+ (r'[/#]', String.Regex),
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'///', String.Regex, ('#pop', 'multilineregex')),
+ (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ # This isn't really guarding against mishighlighting well-formed
+ # code, just the ability to infinite-loop between root and
+ # slashstartsregex.
+ (r'/', Operator, '#pop'),
+ default('#pop'),
+ ],
+ 'root': [
+ include('commentsandwhitespace'),
+ (r'\A(?=\s|/)', Text, 'slashstartsregex'),
+ (_operator_re, Operator, 'slashstartsregex'),
+ (r'(?:\([^()]*\))?\s*[=-]>', Name.Function, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(?<![.$])(for|own|in|of|while|until|'
+ r'loop|break|return|continue|'
+ r'switch|when|then|if|unless|else|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
+ r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
+ (r'(?<![.$])(true|false|yes|no|on|off|null|'
+ r'NaN|Infinity|undefined)\b',
+ Keyword.Constant),
+ (r'(Array|Boolean|Date|Error|Function|Math|'
+ r'Number|Object|RegExp|String|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|globalThis|Symbol)\b',
+ Name.Builtin),
+ (r'([$a-zA-Z_][\w.:$]*)(\s*)([:=])(\s+)',
+ bygroups(Name.Variable, Whitespace, Operator, Whitespace),
+ 'slashstartsregex'),
+ (r'(@[$a-zA-Z_][\w.:$]*)(\s*)([:=])(\s+)',
+ bygroups(Name.Variable.Instance, Whitespace, Operator, Whitespace),
+ 'slashstartsregex'),
+ (r'@', Name.Other, 'slashstartsregex'),
+ (r'@?[$a-zA-Z_][\w$]*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ ('"""', String, 'tdqs'),
+ ("'''", String, 'tsqs'),
+ ('"', String, 'dqs'),
+ ("'", String, 'sqs'),
+ ],
+ 'strings': [
+ (r'[^#\\\'"]+', String),
+ # note that all coffee script strings are multi-line.
+ # hashmarks, quotes and backslashes must be parsed one at a time
+ ],
+ 'interpoling_string': [
+ (r'\}', String.Interpol, "#pop"),
+ include('root')
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\.|\'', String), # double-quoted string don't need ' escapes
+ (r'#\{', String.Interpol, "interpoling_string"),
+ (r'#', String),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r'#|\\.|"', String), # single quoted strings don't need " escapses
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ (r'\\.|\'|"', String), # no need to escape quotes in triple-string
+ (r'#\{', String.Interpol, "interpoling_string"),
+ (r'#', String),
+ include('strings'),
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
+ include('strings')
+ ],
+ }
+
+
+class MaskLexer(RegexLexer):
+ """
+ For Mask markup.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Mask'
+ url = 'https://github.com/atmajs/MaskJS'
+ aliases = ['mask']
+ filenames = ['*.mask']
+ mimetypes = ['text/x-mask']
+
+ flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'[{};>]', Punctuation),
+ (r"'''", String, 'string-trpl-single'),
+ (r'"""', String, 'string-trpl-double'),
+ (r"'", String, 'string-single'),
+ (r'"', String, 'string-double'),
+ (r'([\w-]+)', Name.Tag, 'node'),
+ (r'([^.#;{>\s]+)', Name.Class, 'node'),
+ (r'(#[\w-]+)', Name.Function, 'node'),
+ (r'(\.[\w-]+)', Name.Variable.Class, 'node')
+ ],
+ 'string-base': [
+ (r'\\.', String.Escape),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'.', String.Single),
+ ],
+ 'string-single': [
+ (r"'", String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-double': [
+ (r'"', String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-trpl-single': [
+ (r"'''", String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-trpl-double': [
+ (r'"""', String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'interpolation': [
+ (r'\]', String.Interpol, '#pop'),
+ (r'(\s*)(:)', bygroups(Whitespace, String.Interpol), 'expression'),
+ (r'(\s*)(\w+)(:)', bygroups(Whitespace, Name.Other, Punctuation)),
+ (r'[^\]]+', String.Interpol)
+ ],
+ 'expression': [
+ (r'[^\]]+', using(JavascriptLexer), '#pop')
+ ],
+ 'node': [
+ (r'\s+', Whitespace),
+ (r'\.', Name.Variable.Class, 'node-class'),
+ (r'\#', Name.Function, 'node-id'),
+ (r'(style)([ \t]*)(=)',
+ bygroups(Name.Attribute, Whitespace, Operator),
+ 'node-attr-style-value'),
+ (r'([\w:-]+)([ \t]*)(=)',
+ bygroups(Name.Attribute, Whitespace, Operator),
+ 'node-attr-value'),
+ (r'[\w:-]+', Name.Attribute),
+ (r'[>{;]', Punctuation, '#pop')
+ ],
+ 'node-class': [
+ (r'[\w-]+', Name.Variable.Class),
+ (r'~\[', String.Interpol, 'interpolation'),
+ default('#pop')
+ ],
+ 'node-id': [
+ (r'[\w-]+', Name.Function),
+ (r'~\[', String.Interpol, 'interpolation'),
+ default('#pop')
+ ],
+ 'node-attr-value': [
+ (r'\s+', Whitespace),
+ (r'\w+', Name.Variable, '#pop'),
+ (r"'", String, 'string-single-pop2'),
+ (r'"', String, 'string-double-pop2'),
+ default('#pop')
+ ],
+ 'node-attr-style-value': [
+ (r'\s+', Whitespace),
+ (r"'", String.Single, 'css-single-end'),
+ (r'"', String.Single, 'css-double-end'),
+ include('node-attr-value')
+ ],
+ 'css-base': [
+ (r'\s+', Whitespace),
+ (r";", Punctuation),
+ (r"[\w\-]+\s*:", Name.Builtin)
+ ],
+ 'css-single-end': [
+ include('css-base'),
+ (r"'", String.Single, '#pop:2'),
+ (r"[^;']+", Name.Entity)
+ ],
+ 'css-double-end': [
+ include('css-base'),
+ (r'"', String.Single, '#pop:2'),
+ (r'[^;"]+', Name.Entity)
+ ],
+ 'string-single-pop2': [
+ (r"'", String.Single, '#pop:2'),
+ include('string-base')
+ ],
+ 'string-double-pop2': [
+ (r'"', String.Single, '#pop:2'),
+ include('string-base')
+ ],
+ }
+
+
+class EarlGreyLexer(RegexLexer):
+ """
+ For Earl-Grey source code.
+
+ .. versionadded: 2.1
+ """
+
+ name = 'Earl Grey'
+ aliases = ['earl-grey', 'earlgrey', 'eg']
+ filenames = ['*.eg']
+ mimetypes = ['text/x-earl-grey']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ include('control'),
+ (r'[^\S\n]+', Text),
+ (r'(;;.*)(\n)', bygroups(Comment, Whitespace)),
+ (r'[\[\]{}:(),;]', Punctuation),
+ (r'(\\)(\n)', bygroups(String.Escape, Whitespace)),
+ (r'\\', Text),
+ include('errors'),
+ (words((
+ 'with', 'where', 'when', 'and', 'not', 'or', 'in',
+ 'as', 'of', 'is'),
+ prefix=r'(?<=\s|\[)', suffix=r'(?![\w$\-])'),
+ Operator.Word),
+ (r'[*@]?->', Name.Function),
+ (r'[+\-*/~^<>%&|?!@#.]*=', Operator.Word),
+ (r'\.{2,3}', Operator.Word), # Range Operator
+ (r'([+*/~^<>&|?!]+)|([#\-](?=\s))|@@+(?=\s)|=+', Operator),
+ (r'(?<![\w$\-])(var|let)(?:[^\w$])', Keyword.Declaration),
+ include('keywords'),
+ include('builtins'),
+ include('assignment'),
+ (r'''(?x)
+ (?:()([a-zA-Z$_](?:[\w$\-]*[\w$])?)|
+ (?<=[\s{\[(])(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?))
+ (?=.*%)''',
+ bygroups(Punctuation, Name.Tag, Punctuation, Name.Class.Start), 'dbs'),
+ (r'[rR]?`', String.Backtick, 'bt'),
+ (r'[rR]?```', String.Backtick, 'tbt'),
+ (r'(?<=[\s\[{(,;])\.([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
+ r'(?=[\s\]}),;])', String.Symbol),
+ include('nested'),
+ (r'(?:[rR]|[rR]\.[gmi]{1,3})?"', String, combined('stringescape', 'dqs')),
+ (r'(?:[rR]|[rR]\.[gmi]{1,3})?\'', String, combined('stringescape', 'sqs')),
+ (r'"""', String, combined('stringescape', 'tdqs')),
+ include('tuple'),
+ include('import_paths'),
+ include('name'),
+ include('numbers'),
+ ],
+ 'dbs': [
+ (r'(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?)(?=[.\[\s])',
+ bygroups(Punctuation, Name.Class.DBS)),
+ (r'(\[)([\^#][a-zA-Z$_](?:[\w$\-]*[\w$])?)(\])',
+ bygroups(Punctuation, Name.Entity.DBS, Punctuation)),
+ (r'\s+', Whitespace),
+ (r'%', Operator.DBS, '#pop'),
+ ],
+ 'import_paths': [
+ (r'(?<=[\s:;,])(\.{1,3}(?:[\w\-]*/)*)(\w(?:[\w\-]*\w)*)(?=[\s;,])',
+ bygroups(Text.Whitespace, Text)),
+ ],
+ 'assignment': [
+ (r'(\.)?([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
+ r'(?=\s+[+\-*/~^<>%&|?!@#.]*\=\s)',
+ bygroups(Punctuation, Name.Variable))
+ ],
+ 'errors': [
+ (words(('Error', 'TypeError', 'ReferenceError'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
+ Name.Exception),
+ (r'''(?x)
+ (?<![\w$])
+ E\.[\w$](?:[\w$\-]*[\w$])?
+ (?:\.[\w$](?:[\w$\-]*[\w$])?)*
+ (?=[({\[?!\s])''',
+ Name.Exception),
+ ],
+ 'control': [
+ (r'''(?x)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
+ (?!\n)\s+
+ (?!and|as|each\*|each|in|is|mod|of|or|when|where|with)
+ (?=(?:[+\-*/~^<>%&|?!@#.])?[a-zA-Z$_](?:[\w$-]*[\w$])?)''',
+ Keyword.Control),
+ (r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(?!\n)(\s+)(?=[\'"\d{\[(])',
+ bygroups(Keyword.Control, Whitespace)),
+ (r'''(?x)
+ (?:
+ (?<=[%=])|
+ (?<=[=\-]>)|
+ (?<=with|each|with)|
+ (?<=each\*|where)
+ )(\s+)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
+ bygroups(Whitespace, Keyword.Control, Punctuation)),
+ (r'''(?x)
+ (?<![+\-*/~^<>%&|?!@#.])(\s+)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
+ bygroups(Whitespace, Keyword.Control, Punctuation)),
+ ],
+ 'nested': [
+ (r'''(?x)
+ (?<=[\w$\]})])(\.)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
+ (?=\s+with(?:\s|\n))''',
+ bygroups(Punctuation, Name.Function)),
+ (r'''(?x)
+ (?<!\s)(\.)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
+ (?=[}\]).,;:\s])''',
+ bygroups(Punctuation, Name.Field)),
+ (r'''(?x)
+ (?<=[\w$\]})])(\.)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
+ (?=[\[{(:])''',
+ bygroups(Punctuation, Name.Function)),
+ ],
+ 'keywords': [
+ (words((
+ 'each', 'each*', 'mod', 'await', 'break', 'chain',
+ 'continue', 'elif', 'expr-value', 'if', 'match',
+ 'return', 'yield', 'pass', 'else', 'require', 'var',
+ 'let', 'async', 'method', 'gen'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
+ Keyword.Pseudo),
+ (words(('this', 'self', '@'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
+ Keyword.Constant),
+ (words((
+ 'Function', 'Object', 'Array', 'String', 'Number',
+ 'Boolean', 'ErrorFactory', 'ENode', 'Promise'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
+ Keyword.Type),
+ ],
+ 'builtins': [
+ (words((
+ 'send', 'object', 'keys', 'items', 'enumerate', 'zip',
+ 'product', 'neighbours', 'predicate', 'equal',
+ 'nequal', 'contains', 'repr', 'clone', 'range',
+ 'getChecker', 'get-checker', 'getProperty', 'get-property',
+ 'getProjector', 'get-projector', 'consume', 'take',
+ 'promisify', 'spawn', 'constructor'),
+ prefix=r'(?<![\w\-#.])', suffix=r'(?![\w\-.])'),
+ Name.Builtin),
+ (words((
+ 'true', 'false', 'null', 'undefined'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
+ Name.Constant),
+ ],
+ 'name': [
+ (r'@([a-zA-Z$_](?:[\w$-]*[\w$])?)', Name.Variable.Instance),
+ (r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(\+\+|\-\-)?',
+ bygroups(Name.Symbol, Operator.Word))
+ ],
+ 'tuple': [
+ (r'#[a-zA-Z_][\w\-]*(?=[\s{(,;])', Name.Namespace)
+ ],
+ 'interpoling_string': [
+ (r'\}', String.Interpol, '#pop'),
+ include('root')
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings': [
+ (r'[^\\\'"]', String),
+ (r'[\'"\\]', String),
+ (r'\n', String) # All strings are multiline in EG
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape),
+ (r'\{', String.Interpol, 'interpoling_string'),
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ include('strings'),
+ ],
+ 'bt': [
+ (r'`', String.Backtick, '#pop'),
+ (r'(?<!`)\n', String.Backtick),
+ (r'\^=?', String.Escape),
+ (r'.+', String.Backtick),
+ ],
+ 'tbt': [
+ (r'```', String.Backtick, '#pop'),
+ (r'\n', String.Backtick),
+ (r'\^=?', String.Escape),
+ (r'[^`]+', String.Backtick),
+ ],
+ 'numbers': [
+ (r'\d+\.(?!\.)\d*([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+', Number.Float),
+ (r'8r[0-7]+', Number.Oct),
+ (r'2r[01]+', Number.Bin),
+ (r'16r[a-fA-F0-9]+', Number.Hex),
+ (r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?',
+ Number.Radix),
+ (r'\d+', Number.Integer)
+ ],
+ }
+
+
+class JuttleLexer(RegexLexer):
+ """
+ For Juttle source code.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'Juttle'
+ url = 'http://juttle.github.io/'
+ aliases = ['juttle']
+ filenames = ['*.juttle']
+ mimetypes = ['application/juttle', 'application/x-juttle',
+ 'text/x-juttle', 'text/juttle']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ (r'^(?=\s|/)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r':\d{2}:\d{2}:\d{2}(\.\d*)?:', String.Moment),
+ (r':(now|beginning|end|forever|yesterday|today|tomorrow|'
+ r'(\d+(\.\d*)?|\.\d+)(ms|[smhdwMy])?):', String.Moment),
+ (r':\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d*)?)?'
+ r'(Z|[+-]\d{2}:\d{2}|[+-]\d{4})?:', String.Moment),
+ (r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|'
+ r'day|week|month|year)[s]?'
+ r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|'
+ r'day|week|month|year)[s]?)'
+ r'|[ ]+(ago|from[ ]+now))*:', String.Moment),
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(import|return|continue|if|else)\b', Keyword, 'slashstartsregex'),
+ (r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration,
+ 'slashstartsregex'),
+ (r'(batch|emit|filter|head|join|keep|pace|pass|put|read|reduce|remove|'
+ r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b',
+ Keyword.Reserved),
+ (r'(true|false|null|Infinity)\b', Keyword.Constant),
+ (r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b',
+ Name.Builtin),
+ (JS_IDENT, Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ]
+
+ }
+
+
+class NodeConsoleLexer(Lexer):
+ """
+ For parsing within an interactive Node.js REPL, such as:
+
+ .. sourcecode:: nodejsrepl
+
+ > let a = 3
+ undefined
+ > a
+ 3
+ > let b = '4'
+ undefined
+ > b
+ '4'
+ > b == a
+ false
+
+ .. versionadded: 2.10
+ """
+ name = 'Node.js REPL console session'
+ aliases = ['nodejsrepl', ]
+ mimetypes = ['text/x-nodejsrepl', ]
+
+ def get_tokens_unprocessed(self, text):
+ jslexer = JavascriptLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('> '):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:1]),
+ (1, Whitespace, line[1:2])]))
+
+ curcode += line[2:]
+ elif line.startswith('...'):
+ # node does a nested ... thing depending on depth
+ code = line.lstrip('.')
+ lead = len(line) - len(code)
+
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:lead])]))
+
+ curcode += code
+ else:
+ if curcode:
+ yield from do_insertions(insertions,
+ jslexer.get_tokens_unprocessed(curcode))
+
+ curcode = ''
+ insertions = []
+
+ yield from do_insertions([],
+ jslexer.get_tokens_unprocessed(line))
+
+ if curcode:
+ yield from do_insertions(insertions,
+ jslexer.get_tokens_unprocessed(curcode))
diff --git a/pygments/lexers/jmespath.py b/pygments/lexers/jmespath.py
new file mode 100644
index 0000000..6250e63
--- /dev/null
+++ b/pygments/lexers/jmespath.py
@@ -0,0 +1,68 @@
+"""
+ pygments.lexers.jmespath
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the JMESPath language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include
+from pygments.token import String, Punctuation, Whitespace, Name, Operator, \
+ Number, Literal, Keyword
+
+__all__ = ['JMESPathLexer']
+
+
+class JMESPathLexer(RegexLexer):
+ """
+ For JMESPath queries.
+ """
+ name = 'JMESPath'
+ url = 'https://jmespath.org'
+ filenames = ['*.jp']
+ aliases = ['jmespath', 'jp']
+
+ tokens = {
+ 'string': [
+ (r"'(\\(.|\n)|[^'\\])*'", String),
+ ],
+ 'punctuation': [
+ (r'(\[\?|[\.\*\[\],:\(\)\{\}\|])', Punctuation),
+ ],
+ 'ws': [
+ (r" |\t|\n|\r", Whitespace)
+ ],
+ "dq-identifier": [
+ (r'[^\\"]+', Name.Variable),
+ (r'\\"', Name.Variable),
+ (r'.', Punctuation, '#pop'),
+ ],
+ 'identifier': [
+ (r'(&)?(")', bygroups(Name.Variable, Punctuation), 'dq-identifier'),
+ (r'(")?(&?[A-Za-z][A-Za-z0-9_-]*)(")?', bygroups(Punctuation, Name.Variable, Punctuation)),
+ ],
+ 'root': [
+ include('ws'),
+ include('string'),
+ (r'(==|!=|<=|>=|<|>|&&|\|\||!)', Operator),
+ include('punctuation'),
+ (r'@', Name.Variable.Global),
+ (r'(&?[A-Za-z][A-Za-z0-9_]*)(\()', bygroups(Name.Function, Punctuation)),
+ (r'(&)(\()', bygroups(Name.Variable, Punctuation)),
+ include('identifier'),
+ (r'-?\d+', Number),
+ (r'`', Literal, 'literal'),
+ ],
+ 'literal': [
+ include('ws'),
+ include('string'),
+ include('punctuation'),
+ (r'(false|true|null)\b', Keyword.Constant),
+ include('identifier'),
+ (r'-?\d+\.?\d*([eE][-+]\d+)?', Number),
+ (r'\\`', Literal),
+ (r'`', Literal, '#pop'),
+ ]
+ }
diff --git a/pygments/lexers/jslt.py b/pygments/lexers/jslt.py
new file mode 100644
index 0000000..b6554e5
--- /dev/null
+++ b/pygments/lexers/jslt.py
@@ -0,0 +1,95 @@
+"""
+ pygments.lexers.jslt
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the JSLT language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, combined, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Whitespace
+
+
+__all__ = ['JSLTLexer']
+
+
+_WORD_END = r'(?=[^0-9A-Z_a-z-])'
+
+
+class JSLTLexer(RegexLexer):
+ """
+ For JSLT source.
+
+ .. versionadded:: 2.10
+ """
+ name = 'JSLT'
+ url = 'https://github.com/schibsted/jslt'
+ filenames = ['*.jslt']
+ aliases = ['jslt']
+ mimetypes = ['text/x-jslt']
+
+ tokens = {
+ 'root': [
+ (r'[\t\n\f\r ]+', Whitespace),
+ (r'//.*(\n|\Z)', Comment.Single),
+ (r'-?(0|[1-9][0-9]*)', Number.Integer),
+ (r'-?(0|[1-9][0-9]*)(.[0-9]+a)?([Ee][+-]?[0-9]+)', Number.Float),
+ (r'"([^"\\]|\\.)*"', String.Double),
+ (r'[(),:\[\]{}]', Punctuation),
+ (r'(!=|[<=>]=?)', Operator),
+ (r'[*+/|-]', Operator),
+ (r'\.', Operator),
+ (words(('import',), suffix=_WORD_END), Keyword.Namespace, combined('import-path', 'whitespace')),
+ (words(('as',), suffix=_WORD_END), Keyword.Namespace, combined('import-alias', 'whitespace')),
+ (words(('let',), suffix=_WORD_END), Keyword.Declaration, combined('constant', 'whitespace')),
+ (words(('def',), suffix=_WORD_END), Keyword.Declaration, combined('function', 'whitespace')),
+ (words(('false', 'null', 'true'), suffix=_WORD_END), Keyword.Constant),
+ (words(('else', 'for', 'if'), suffix=_WORD_END), Keyword),
+ (words(('and', 'or'), suffix=_WORD_END), Operator.Word),
+ (words((
+ 'all', 'any', 'array', 'boolean', 'capture', 'ceiling',
+ 'contains', 'ends-with', 'error', 'flatten', 'floor',
+ 'format-time', 'from-json', 'get-key', 'hash-int', 'index-of',
+ 'is-array', 'is-boolean', 'is-decimal', 'is-integer',
+ 'is-number', 'is-object', 'is-string', 'join', 'lowercase',
+ 'max', 'min', 'mod', 'not', 'now', 'number', 'parse-time',
+ 'parse-url', 'random', 'replace', 'round', 'sha256-hex', 'size',
+ 'split', 'starts-with', 'string', 'sum', 'test', 'to-json',
+ 'trim', 'uppercase', 'zip', 'zip-with-index', 'fallback'), suffix=_WORD_END),
+ Name.Builtin),
+ (r'[A-Z_a-z][0-9A-Z_a-z-]*:[A-Z_a-z][0-9A-Z_a-z-]*', Name.Function),
+ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name),
+ (r'\$[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable),
+ ],
+ 'constant': [
+ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable, 'root'),
+ ],
+ 'function': [
+ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Function, combined('function-parameter-list', 'whitespace')),
+ ],
+ 'function-parameter-list': [
+ (r'\(', Punctuation, combined('function-parameters', 'whitespace')),
+ ],
+ 'function-parameters': [
+ (r',', Punctuation),
+ (r'\)', Punctuation, 'root'),
+ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable),
+ ],
+ 'import-path': [
+ (r'"([^"]|\\.)*"', String.Symbol, 'root'),
+ ],
+ 'import-alias': [
+ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Namespace, 'root'),
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'\\.', String.Escape),
+ ],
+ 'whitespace': [
+ (r'[\t\n\f\r ]+', Whitespace),
+ (r'//.*(\n|\Z)', Comment.Single),
+ ]
+ }
diff --git a/pygments/lexers/jsonnet.py b/pygments/lexers/jsonnet.py
new file mode 100644
index 0000000..305058c
--- /dev/null
+++ b/pygments/lexers/jsonnet.py
@@ -0,0 +1,168 @@
+"""
+ pygments.lexers.jsonnet
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Jsonnet data templating language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import include, RegexLexer, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
+
+__all__ = ['JsonnetLexer']
+
+jsonnet_token = r'[^\W\d]\w*'
+jsonnet_function_token = jsonnet_token + r'(?=\()'
+
+
+def string_rules(quote_mark):
+ return [
+ (r"[^{}\\]".format(quote_mark), String),
+ (r"\\.", String.Escape),
+ (quote_mark, String, '#pop'),
+ ]
+
+
+def quoted_field_name(quote_mark):
+ return [
+ (r'([^{quote}\\]|\\.)*{quote}'.format(quote=quote_mark),
+ Name.Variable, 'field_separator')
+ ]
+
+
+class JsonnetLexer(RegexLexer):
+ """Lexer for Jsonnet source code."""
+
+ name = 'Jsonnet'
+ aliases = ['jsonnet']
+ filenames = ['*.jsonnet', '*.libsonnet']
+ url = "https://jsonnet.org"
+ tokens = {
+ # Not used by itself
+ '_comments': [
+ (r'(//|#).*\n', Comment.Single),
+ (r'/\*\*([^/]|/(?!\*))*\*/', String.Doc),
+ (r'/\*([^/]|/(?!\*))*\*/', Comment),
+ ],
+ 'root': [
+ include('_comments'),
+ (r"@'.*'", String),
+ (r'@".*"', String),
+ (r"'", String, 'singlestring'),
+ (r'"', String, 'doublestring'),
+ (r'\|\|\|(.|\n)*\|\|\|', String),
+ # Jsonnet has no integers, only an IEEE754 64-bit float
+ (r'[+-]?[0-9]+(.[0-9])?', Number.Float),
+ # Omit : despite spec because it appears to be used as a field
+ # separator
+ (r'[!$~+\-&|^=<>*/%]', Operator),
+ (r'\{', Punctuation, 'object'),
+ (r'\[', Punctuation, 'array'),
+ (r'local\b', Keyword, ('local_name')),
+ (r'assert\b', Keyword, 'assert'),
+ (words([
+ 'assert', 'else', 'error', 'false', 'for', 'if', 'import',
+ 'importstr', 'in', 'null', 'tailstrict', 'then', 'self',
+ 'super', 'true',
+ ], suffix=r'\b'), Keyword),
+ (r'\s+', Whitespace),
+ (r'function(?=\()', Keyword, 'function_params'),
+ (r'std\.' + jsonnet_function_token, Name.Builtin, 'function_args'),
+ (jsonnet_function_token, Name.Function, 'function_args'),
+ (jsonnet_token, Name.Variable),
+ (r'[\.()]', Punctuation),
+ ],
+ 'singlestring': string_rules("'"),
+ 'doublestring': string_rules('"'),
+ 'array': [
+ (r',', Punctuation),
+ (r'\]', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'local_name': [
+ (jsonnet_function_token, Name.Function, 'function_params'),
+ (jsonnet_token, Name.Variable),
+ (r'\s+', Whitespace),
+ ('(?==)', Whitespace, ('#pop', 'local_value')),
+ ],
+ 'local_value': [
+ (r'=', Operator),
+ (r';', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'assert': [
+ (r':', Punctuation),
+ (r';', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'function_params': [
+ (jsonnet_token, Name.Variable),
+ (r'\(', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'\s+', Whitespace),
+ (r'=', Operator, 'function_param_default'),
+ ],
+ 'function_args': [
+ (r'\(', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'\s+', Whitespace),
+ include('root'),
+ ],
+ 'object': [
+ (r'\s+', Whitespace),
+ (r'local\b', Keyword, 'object_local_name'),
+ (r'assert\b', Keyword, 'object_assert'),
+ (r'\[', Operator, 'field_name_expr'),
+ (fr'(?={jsonnet_token})', Text, 'field_name'),
+ (r'\}', Punctuation, '#pop'),
+ (r'"', Name.Variable, 'double_field_name'),
+ (r"'", Name.Variable, 'single_field_name'),
+ include('_comments'),
+ ],
+ 'field_name': [
+ (jsonnet_function_token, Name.Function,
+ ('field_separator', 'function_params')
+ ),
+ (jsonnet_token, Name.Variable, 'field_separator'),
+ ],
+ 'double_field_name': quoted_field_name('"'),
+ 'single_field_name': quoted_field_name("'"),
+ 'field_name_expr': [
+ (r'\]', Operator, 'field_separator'),
+ include('root'),
+ ],
+ 'function_param_default': [
+ (r'(?=[,\)])', Whitespace, '#pop'),
+ include('root'),
+ ],
+ 'field_separator': [
+ (r'\s+', Whitespace),
+ (r'\+?::?:?', Punctuation, ('#pop', '#pop', 'field_value')),
+ include('_comments'),
+ ],
+ 'field_value': [
+ (r',', Punctuation, '#pop'),
+ (r'\}', Punctuation, '#pop:2'),
+ include('root'),
+ ],
+ 'object_assert': [
+ (r':', Punctuation),
+ (r',', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'object_local_name': [
+ (jsonnet_token, Name.Variable, ('#pop', 'object_local_value')),
+ (r'\s+', Whitespace),
+ ],
+ 'object_local_value': [
+ (r'=', Operator),
+ (r',', Punctuation, '#pop'),
+ (r'\}', Punctuation, '#pop:2'),
+ include('root'),
+ ],
+ }
diff --git a/pygments/lexers/julia.py b/pygments/lexers/julia.py
new file mode 100644
index 0000000..86e760c
--- /dev/null
+++ b/pygments/lexers/julia.py
@@ -0,0 +1,294 @@
+"""
+ pygments.lexers.julia
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Julia language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
+ words, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+from pygments.util import shebang_matches
+from pygments.lexers._julia_builtins import OPERATORS_LIST, DOTTED_OPERATORS_LIST, \
+ KEYWORD_LIST, BUILTIN_LIST, LITERAL_LIST
+
+__all__ = ['JuliaLexer', 'JuliaConsoleLexer']
+
+# see https://docs.julialang.org/en/v1/manual/variables/#Allowed-Variable-Names
+allowed_variable = \
+ '(?:[a-zA-Z_\u00A1-\U0010ffff][a-zA-Z_0-9!\u00A1-\U0010ffff]*)'
+# see https://github.com/JuliaLang/julia/blob/master/src/flisp/julia_opsuffs.h
+operator_suffixes = r'[²³¹ʰʲʳʷʸˡˢˣᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁᵂᵃᵇᵈᵉᵍᵏᵐᵒᵖᵗᵘᵛᵝᵞᵟᵠᵡᵢᵣᵤᵥᵦᵧᵨᵩᵪᶜᶠᶥᶦᶫᶰᶸᶻᶿ′″‴‵‶‷⁗⁰ⁱ⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾ⁿ₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎ₐₑₒₓₕₖₗₘₙₚₛₜⱼⱽ]*'
+
+class JuliaLexer(RegexLexer):
+ """
+ For Julia source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Julia'
+ url = 'https://julialang.org/'
+ aliases = ['julia', 'jl']
+ filenames = ['*.jl']
+ mimetypes = ['text/x-julia', 'application/x-julia']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'[^\S\n]+', Whitespace),
+ (r'#=', Comment.Multiline, "blockcomment"),
+ (r'#.*$', Comment),
+ (r'[\[\](),;]', Punctuation),
+
+ # symbols
+ # intercept range expressions first
+ (r'(' + allowed_variable + r')(\s*)(:)(' + allowed_variable + ')',
+ bygroups(Name, Whitespace, Operator, Name)),
+ # then match :name which does not follow closing brackets, digits, or the
+ # ::, <:, and :> operators
+ (r'(?<![\]):<>\d.])(:' + allowed_variable + ')', String.Symbol),
+
+ # type assertions - excludes expressions like ::typeof(sin) and ::avec[1]
+ (r'(?<=::)(\s*)(' + allowed_variable + r')\b(?![(\[])',
+ bygroups(Whitespace, Keyword.Type)),
+ # type comparisons
+ # - MyType <: A or MyType >: A
+ ('(' + allowed_variable + r')(\s*)([<>]:)(\s*)(' + allowed_variable + r')\b(?![(\[])',
+ bygroups(Keyword.Type, Whitespace, Operator, Whitespace, Keyword.Type)),
+ # - <: B or >: B
+ (r'([<>]:)(\s*)(' + allowed_variable + r')\b(?![(\[])',
+ bygroups(Operator, Whitespace, Keyword.Type)),
+ # - A <: or A >:
+ (r'\b(' + allowed_variable + r')(\s*)([<>]:)',
+ bygroups(Keyword.Type, Whitespace, Operator)),
+
+ # operators
+ # Suffixes aren't actually allowed on all operators, but we'll ignore that
+ # since those cases are invalid Julia code.
+ (words([*OPERATORS_LIST, *DOTTED_OPERATORS_LIST],
+ suffix=operator_suffixes), Operator),
+ (words(['.' + o for o in DOTTED_OPERATORS_LIST],
+ suffix=operator_suffixes), Operator),
+ (words(['...', '..']), Operator),
+
+ # NOTE
+ # Patterns below work only for definition sites and thus hardly reliable.
+ #
+ # functions
+ # (r'(function)(\s+)(' + allowed_variable + ')',
+ # bygroups(Keyword, Text, Name.Function)),
+
+ # chars
+ (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|"
+ r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char),
+
+ # try to match trailing transpose
+ (r'(?<=[.\w)\]])(\'' + operator_suffixes + ')+', Operator),
+
+ # raw strings
+ (r'(raw)(""")', bygroups(String.Affix, String), 'tqrawstring'),
+ (r'(raw)(")', bygroups(String.Affix, String), 'rawstring'),
+ # regular expressions
+ (r'(r)(""")', bygroups(String.Affix, String.Regex), 'tqregex'),
+ (r'(r)(")', bygroups(String.Affix, String.Regex), 'regex'),
+ # other strings
+ (r'(' + allowed_variable + ')?(""")',
+ bygroups(String.Affix, String), 'tqstring'),
+ (r'(' + allowed_variable + ')?(")',
+ bygroups(String.Affix, String), 'string'),
+
+ # backticks
+ (r'(' + allowed_variable + ')?(```)',
+ bygroups(String.Affix, String.Backtick), 'tqcommand'),
+ (r'(' + allowed_variable + ')?(`)',
+ bygroups(String.Affix, String.Backtick), 'command'),
+
+ # type names
+ # - names that begin a curly expression
+ ('(' + allowed_variable + r')(\{)',
+ bygroups(Keyword.Type, Punctuation), 'curly'),
+ # - names as part of bare 'where'
+ (r'(where)(\s+)(' + allowed_variable + ')',
+ bygroups(Keyword, Whitespace, Keyword.Type)),
+ # - curly expressions in general
+ (r'(\{)', Punctuation, 'curly'),
+ # - names as part of type declaration
+ (r'(abstract|primitive)([ \t]+)(type\b)([\s()]+)(' +
+ allowed_variable + r')',
+ bygroups(Keyword, Whitespace, Keyword, Text, Keyword.Type)),
+ (r'(mutable(?=[ \t]))?([ \t]+)?(struct\b)([\s()]+)(' +
+ allowed_variable + r')',
+ bygroups(Keyword, Whitespace, Keyword, Text, Keyword.Type)),
+
+ # macros
+ (r'@' + allowed_variable, Name.Decorator),
+ (words([*OPERATORS_LIST, '..', '.', *DOTTED_OPERATORS_LIST],
+ prefix='@', suffix=operator_suffixes), Name.Decorator),
+
+ # keywords
+ (words(KEYWORD_LIST, suffix=r'\b'), Keyword),
+ # builtin types
+ (words(BUILTIN_LIST, suffix=r'\b'), Keyword.Type),
+ # builtin literals
+ (words(LITERAL_LIST, suffix=r'\b'), Name.Builtin),
+
+ # names
+ (allowed_variable, Name),
+
+ # numbers
+ (r'(\d+((_\d+)+)?\.(?!\.)(\d+((_\d+)+)?)?|\.\d+((_\d+)+)?)([eEf][+-]?[0-9]+)?', Number.Float),
+ (r'\d+((_\d+)+)?[eEf][+-]?[0-9]+', Number.Float),
+ (r'0x[a-fA-F0-9]+((_[a-fA-F0-9]+)+)?(\.([a-fA-F0-9]+((_[a-fA-F0-9]+)+)?)?)?p[+-]?\d+', Number.Float),
+ (r'0b[01]+((_[01]+)+)?', Number.Bin),
+ (r'0o[0-7]+((_[0-7]+)+)?', Number.Oct),
+ (r'0x[a-fA-F0-9]+((_[a-fA-F0-9]+)+)?', Number.Hex),
+ (r'\d+((_\d+)+)?', Number.Integer),
+
+ # single dot operator matched last to permit e.g. ".1" as a float
+ (words(['.']), Operator),
+ ],
+
+ "blockcomment": [
+ (r'[^=#]', Comment.Multiline),
+ (r'#=', Comment.Multiline, '#push'),
+ (r'=#', Comment.Multiline, '#pop'),
+ (r'[=#]', Comment.Multiline),
+ ],
+
+ 'curly': [
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ (allowed_variable, Keyword.Type),
+ include('root'),
+ ],
+
+ 'tqrawstring': [
+ (r'"""', String, '#pop'),
+ (r'([^"]|"[^"][^"])+', String),
+ ],
+ 'rawstring': [
+ (r'"', String, '#pop'),
+ (r'\\"', String.Escape),
+ (r'([^"\\]|\\[^"])+', String),
+ ],
+
+ # Interpolation is defined as "$" followed by the shortest full
+ # expression, which is something we can't parse. Include the most
+ # common cases here: $word, and $(paren'd expr).
+ 'interp': [
+ (r'\$' + allowed_variable, String.Interpol),
+ (r'(\$)(\()', bygroups(String.Interpol, Punctuation), 'in-intp'),
+ ],
+ 'in-intp': [
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop'),
+ include('root'),
+ ],
+
+ 'string': [
+ (r'(")(' + allowed_variable + r'|\d+)?',
+ bygroups(String, String.Affix), '#pop'),
+ # FIXME: This escape pattern is not perfect.
+ (r'\\([\\"\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape),
+ include('interp'),
+ # @printf and @sprintf formats
+ (r'%[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?[hlL]?[E-GXc-giorsux%]',
+ String.Interpol),
+ (r'[^"$%\\]+', String),
+ (r'.', String),
+ ],
+ 'tqstring': [
+ (r'(""")(' + allowed_variable + r'|\d+)?',
+ bygroups(String, String.Affix), '#pop'),
+ (r'\\([\\"\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape),
+ include('interp'),
+ (r'[^"$%\\]+', String),
+ (r'.', String),
+ ],
+
+ 'regex': [
+ (r'(")([imsxa]*)?', bygroups(String.Regex, String.Affix), '#pop'),
+ (r'\\"', String.Regex),
+ (r'[^\\"]+', String.Regex),
+ ],
+
+ 'tqregex': [
+ (r'(""")([imsxa]*)?', bygroups(String.Regex, String.Affix), '#pop'),
+ (r'[^"]+', String.Regex),
+ ],
+
+ 'command': [
+ (r'(`)(' + allowed_variable + r'|\d+)?',
+ bygroups(String.Backtick, String.Affix), '#pop'),
+ (r'\\[`$]', String.Escape),
+ include('interp'),
+ (r'[^\\`$]+', String.Backtick),
+ (r'.', String.Backtick),
+ ],
+ 'tqcommand': [
+ (r'(```)(' + allowed_variable + r'|\d+)?',
+ bygroups(String.Backtick, String.Affix), '#pop'),
+ (r'\\\$', String.Escape),
+ include('interp'),
+ (r'[^\\`$]+', String.Backtick),
+ (r'.', String.Backtick),
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'julia')
+
+
+class JuliaConsoleLexer(Lexer):
+ """
+ For Julia console sessions. Modeled after MatlabSessionLexer.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Julia console'
+ aliases = ['jlcon', 'julia-repl']
+
+ def get_tokens_unprocessed(self, text):
+ jllexer = JuliaLexer(**self.options)
+ start = 0
+ curcode = ''
+ insertions = []
+ output = False
+ error = False
+
+ for line in text.splitlines(keepends=True):
+ if line.startswith('julia>'):
+ insertions.append((len(curcode), [(0, Generic.Prompt, line[:6])]))
+ curcode += line[6:]
+ output = False
+ error = False
+ elif line.startswith('help?>') or line.startswith('shell>'):
+ yield start, Generic.Prompt, line[:6]
+ yield start + 6, Text, line[6:]
+ output = False
+ error = False
+ elif line.startswith(' ') and not output:
+ insertions.append((len(curcode), [(0, Whitespace, line[:6])]))
+ curcode += line[6:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, jllexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ if line.startswith('ERROR: ') or error:
+ yield start, Generic.Error, line
+ error = True
+ else:
+ yield start, Generic.Output, line
+ output = True
+ start += len(line)
+
+ if curcode:
+ yield from do_insertions(
+ insertions, jllexer.get_tokens_unprocessed(curcode))
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
new file mode 100644
index 0000000..ddfc25d
--- /dev/null
+++ b/pygments/lexers/jvm.py
@@ -0,0 +1,1820 @@
+"""
+ pygments.lexers.jvm
+ ~~~~~~~~~~~~~~~~~~~
+
+ Pygments lexers for JVM languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
+ this, combined, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+from pygments.util import shebang_matches
+from pygments import unistring as uni
+
+__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
+ 'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'ClojureScriptLexer',
+ 'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer',
+ 'PigLexer', 'GoloLexer', 'JasminLexer', 'SarlLexer']
+
+
+class JavaLexer(RegexLexer):
+ """
+ For Java source code.
+ """
+
+ name = 'Java'
+ url = 'https://www.oracle.com/technetwork/java/'
+ aliases = ['java']
+ filenames = ['*.java']
+ mimetypes = ['text/x-java']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'(^\s*)((?:(?:public|private|protected|static|strictfp)(?:\s+))*)(record)\b',
+ bygroups(Whitespace, using(this), Keyword.Declaration), 'class'),
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ # keywords: go before method names to avoid lexing "throw new XYZ"
+ # as a method signature
+ (r'(assert|break|case|catch|continue|default|do|else|finally|for|'
+ r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
+ Keyword),
+ # method names
+ (r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
+ r'((?:[^\W\d]|\$)[\w$]*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Punctuation)),
+ (r'@[^\W\d][\w.]*', Name.Decorator),
+ (r'(abstract|const|enum|extends|final|implements|native|private|'
+ r'protected|public|sealed|static|strictfp|super|synchronized|throws|'
+ r'transient|volatile|yield)\b', Keyword.Declaration),
+ (r'(boolean|byte|char|double|float|int|long|short|void)\b',
+ Keyword.Type),
+ (r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(class|interface)\b', Keyword.Declaration, 'class'),
+ (r'(var)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'var'),
+ (r'(import(?:\s+static)?)(\s+)', bygroups(Keyword.Namespace, Whitespace),
+ 'import'),
+ (r'"""\n', String, 'multiline_string'),
+ (r'"', String, 'string'),
+ (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
+ (r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation,
+ Name.Attribute)),
+ (r'^(\s*)(default)(:)', bygroups(Whitespace, Keyword, Punctuation)),
+ (r'^(\s*)((?:[^\W\d]|\$)[\w$]*)(:)', bygroups(Whitespace, Name.Label,
+ Punctuation)),
+ (r'([^\W\d]|\$)[\w$]*', Name),
+ (r'([0-9][0-9_]*\.([0-9][0-9_]*)?|'
+ r'\.[0-9][0-9_]*)'
+ r'([eE][+\-]?[0-9][0-9_]*)?[fFdD]?|'
+ r'[0-9][eE][+\-]?[0-9][0-9_]*[fFdD]?|'
+ r'[0-9]([eE][+\-]?[0-9][0-9_]*)?[fFdD]|'
+ r'0[xX]([0-9a-fA-F][0-9a-fA-F_]*\.?|'
+ r'([0-9a-fA-F][0-9a-fA-F_]*)?\.[0-9a-fA-F][0-9a-fA-F_]*)'
+ r'[pP][+\-]?[0-9][0-9_]*[fFdD]?', Number.Float),
+ (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*[lL]?', Number.Hex),
+ (r'0[bB][01][01_]*[lL]?', Number.Bin),
+ (r'0[0-7_]+[lL]?', Number.Oct),
+ (r'0|[1-9][0-9_]*[lL]?', Number.Integer),
+ (r'[~^*!%&\[\]<>|+=/?-]', Operator),
+ (r'[{}();:.,]', Punctuation),
+ (r'\n', Whitespace)
+ ],
+ 'class': [
+ (r'\s+', Text),
+ (r'([^\W\d]|\$)[\w$]*', Name.Class, '#pop')
+ ],
+ 'var': [
+ (r'([^\W\d]|\$)[\w$]*', Name, '#pop')
+ ],
+ 'import': [
+ (r'[\w.]+\*?', Name.Namespace, '#pop')
+ ],
+ 'multiline_string': [
+ (r'"""', String, '#pop'),
+ (r'"', String),
+ include('string')
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r'\\\\', String), # Escaped backslash
+ (r'\\"', String), # Escaped quote
+ (r'\\', String), # Bare backslash
+ (r'"', String, '#pop'), # Closing quote
+ ],
+ }
+
+
+class AspectJLexer(JavaLexer):
+ """
+ For AspectJ source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'AspectJ'
+ url = 'http://www.eclipse.org/aspectj/'
+ aliases = ['aspectj']
+ filenames = ['*.aj']
+ mimetypes = ['text/x-aspectj']
+
+ aj_keywords = {
+ 'aspect', 'pointcut', 'privileged', 'call', 'execution',
+ 'initialization', 'preinitialization', 'handler', 'get', 'set',
+ 'staticinitialization', 'target', 'args', 'within', 'withincode',
+ 'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around',
+ 'proceed', 'throwing', 'returning', 'adviceexecution', 'declare',
+ 'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint',
+ 'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart',
+ 'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow',
+ 'pertypewithin', 'lock', 'unlock', 'thisAspectInstance'
+ }
+ aj_inter_type = {'parents:', 'warning:', 'error:', 'soft:', 'precedence:'}
+ aj_inter_type_annotation = {'@type', '@method', '@constructor', '@field'}
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in JavaLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.aj_keywords:
+ yield index, Keyword, value
+ elif token is Name.Label and value in self.aj_inter_type:
+ yield index, Keyword, value[:-1]
+ yield index, Operator, value[-1]
+ elif token is Name.Decorator and value in self.aj_inter_type_annotation:
+ yield index, Keyword, value
+ else:
+ yield index, token, value
+
+
+class ScalaLexer(RegexLexer):
+ """
+ For Scala source code.
+ """
+
+ name = 'Scala'
+ url = 'http://www.scala-lang.org'
+ aliases = ['scala']
+ filenames = ['*.scala']
+ mimetypes = ['text/x-scala']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ opchar = '[!#%&*\\-\\/:?@^' + uni.combine('Sm', 'So') + ']'
+ letter = '[_\\$' + uni.combine('Ll', 'Lu', 'Lo', 'Nl', 'Lt') + ']'
+ upperLetter = '[' + uni.combine('Lu', 'Lt') + ']'
+ letterOrDigit = '(?:%s|[0-9])' % letter
+ letterOrDigitNoDollarSign = '(?:%s|[0-9])' % letter.replace('\\$', '')
+ alphaId = '%s+' % letter
+ simpleInterpolatedVariable = '%s%s*' % (letter, letterOrDigitNoDollarSign)
+ idrest = '%s%s*(?:(?<=_)%s+)?' % (letter, letterOrDigit, opchar)
+ idUpper = '%s%s*(?:(?<=_)%s+)?' % (upperLetter, letterOrDigit, opchar)
+ plainid = '(?:%s|%s+)' % (idrest, opchar)
+ backQuotedId = r'`[^`]+`'
+ anyId = r'(?:%s|%s)' % (plainid, backQuotedId)
+ notStartOfComment = r'(?!//|/\*)'
+ endOfLineMaybeWithComment = r'(?=\s*(//|$))'
+
+ keywords = (
+ 'new', 'return', 'throw', 'classOf', 'isInstanceOf', 'asInstanceOf',
+ 'else', 'if', 'then', 'do', 'while', 'for', 'yield', 'match', 'case',
+ 'catch', 'finally', 'try'
+ )
+
+ operators = (
+ '<%', '=:=', '<:<', '<%<', '>:', '<:', '=', '==', '!=', '<=', '>=',
+ '<>', '<', '>', '<-', '←', '->', '→', '=>', '⇒', '?', '@', '|', '-',
+ '+', '*', '%', '~', '\\'
+ )
+
+ storage_modifiers = (
+ 'private', 'protected', 'synchronized', '@volatile', 'abstract',
+ 'final', 'lazy', 'sealed', 'implicit', 'override', '@transient',
+ '@native'
+ )
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ include('script-header'),
+ include('imports'),
+ include('exports'),
+ include('storage-modifiers'),
+ include('annotations'),
+ include('using'),
+ include('declarations'),
+ include('inheritance'),
+ include('extension'),
+ include('end'),
+ include('constants'),
+ include('strings'),
+ include('symbols'),
+ include('singleton-type'),
+ include('inline'),
+ include('quoted'),
+ include('keywords'),
+ include('operators'),
+ include('punctuation'),
+ include('names'),
+ ],
+
+ # Includes:
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ 'comments': [
+ (r'//.*?\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+ ],
+ 'script-header': [
+ (r'^#!([^\n]*)$', Comment.Hashbang),
+ ],
+ 'imports': [
+ (r'\b(import)(\s+)', bygroups(Keyword, Whitespace), 'import-path'),
+ ],
+ 'exports': [
+ (r'\b(export)(\s+)(given)(\s+)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace), 'export-path'),
+ (r'\b(export)(\s+)', bygroups(Keyword, Whitespace), 'export-path'),
+ ],
+ 'storage-modifiers': [
+ (words(storage_modifiers, prefix=r'\b', suffix=r'\b'), Keyword),
+ # Only highlight soft modifiers if they are eventually followed by
+ # the correct keyword. Note that soft modifiers can be followed by a
+ # sequence of regular modifiers; [a-z\s]* skips those, and we just
+ # check that the soft modifier is applied to a supported statement.
+ (r'\b(transparent|opaque|infix|open|inline)\b(?=[a-z\s]*\b'
+ r'(def|val|var|given|type|class|trait|object|enum)\b)', Keyword),
+ ],
+ 'annotations': [
+ (r'@%s' % idrest, Name.Decorator),
+ ],
+ 'using': [
+ # using is a soft keyword, can only be used in the first position of
+ # a parameter or argument list.
+ (r'(\()(\s*)(using)(\s)', bygroups(Punctuation, Whitespace, Keyword, Whitespace)),
+ ],
+ 'declarations': [
+ (r'\b(def)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'\b(trait)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'\b(?:(case)(\s+))?(class|object|enum)\b(\s*)%s(%s)?' %
+ (notStartOfComment, anyId),
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class)),
+ (r'(?<!\.)\b(type)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'\b(val|var)\b', Keyword.Declaration),
+ (r'\b(package)(\s+)(object)\b(\s*)%s(%s)?' %
+ (notStartOfComment, anyId),
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Namespace)),
+ (r'\b(package)(\s+)', bygroups(Keyword, Whitespace), 'package'),
+ (r'\b(given)\b(\s*)(%s)' % idUpper,
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'\b(given)\b(\s*)(%s)?' % anyId,
+ bygroups(Keyword, Whitespace, Name)),
+ ],
+ 'inheritance': [
+ (r'\b(extends|with|derives)\b(\s*)'
+ r'(%s|%s|(?=\([^\)]+=>)|(?=%s)|(?="))?' %
+ (idUpper, backQuotedId, plainid),
+ bygroups(Keyword, Whitespace, Name.Class)),
+ ],
+ 'extension': [
+ (r'\b(extension)(\s+)(?=[\[\(])', bygroups(Keyword, Whitespace)),
+ ],
+ 'end': [
+ # end is a soft keyword, should only be highlighted in certain cases
+ (r'\b(end)(\s+)(if|while|for|match|new|extension|val|var)\b',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'\b(end)(\s+)(%s)%s' % (idUpper, endOfLineMaybeWithComment),
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'\b(end)(\s+)(%s|%s)?%s' %
+ (backQuotedId, plainid, endOfLineMaybeWithComment),
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ ],
+ 'punctuation': [
+ (r'[{}()\[\];,.]', Punctuation),
+ (r'(?<!:):(?!:)', Punctuation),
+ ],
+ 'keywords': [
+ (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
+ ],
+ 'operators': [
+ (r'(%s{2,})(\s+)' % opchar, bygroups(Operator, Whitespace)),
+ (r'/(?![/*])', Operator),
+ (words(operators), Operator),
+ (r'(?<!%s)(!|&&|\|\|)(?!%s)' % (opchar, opchar), Operator),
+ ],
+ 'constants': [
+ (r'\b(this|super)\b', Name.Builtin.Pseudo),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'0[xX][0-9a-fA-F_]*', Number.Hex),
+ (r'([0-9][0-9_]*\.[0-9][0-9_]*|\.[0-9][0-9_]*)'
+ r'([eE][+-]?[0-9][0-9_]*)?[fFdD]?', Number.Float),
+ (r'[0-9]+([eE][+-]?[0-9]+)?[fFdD]', Number.Float),
+ (r'[0-9]+([eE][+-]?[0-9]+)[fFdD]?', Number.Float),
+ (r'[0-9]+[lL]', Number.Integer.Long),
+ (r'[0-9]+', Number.Integer),
+ (r'""".*?"""(?!")', String),
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r"(')(\\.)(')", bygroups(String.Char, String.Escape, String.Char)),
+ (r"'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
+ ],
+ "strings": [
+ (r'[fs]"""', String, 'interpolated-string-triple'),
+ (r'[fs]"', String, 'interpolated-string'),
+ (r'raw"(\\\\|\\"|[^"])*"', String),
+ ],
+ 'symbols': [
+ (r"('%s)(?!')" % plainid, String.Symbol),
+ ],
+ 'singleton-type': [
+ (r'(\.)(type)\b', bygroups(Punctuation, Keyword)),
+ ],
+ 'inline': [
+ # inline is a soft modifier, only highlighted if followed by if,
+ # match or parameters.
+ (r'\b(inline)(?=\s+(%s|%s)\s*:)' % (plainid, backQuotedId),
+ Keyword),
+ (r'\b(inline)\b(?=(?:.(?!\b(?:val|def|given)\b))*\b(if|match)\b)',
+ Keyword),
+ ],
+ 'quoted': [
+ # '{...} or ${...}
+ (r"['$]\{(?!')", Punctuation),
+ # '[...]
+ (r"'\[(?!')", Punctuation),
+ ],
+ 'names': [
+ (idUpper, Name.Class),
+ (anyId, Name),
+ ],
+
+ # States
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'import-path': [
+ (r'(?<=[\n;:])', Text, '#pop'),
+ include('comments'),
+ (r'\b(given)\b', Keyword),
+ include('qualified-name'),
+ (r'\{', Punctuation, 'import-path-curly-brace'),
+ ],
+ 'import-path-curly-brace': [
+ include('whitespace'),
+ include('comments'),
+ (r'\b(given)\b', Keyword),
+ (r'=>', Operator),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'[\[\]]', Punctuation),
+ include('qualified-name'),
+ ],
+ 'export-path': [
+ (r'(?<=[\n;:])', Text, '#pop'),
+ include('comments'),
+ include('qualified-name'),
+ (r'\{', Punctuation, 'export-path-curly-brace'),
+ ],
+ 'export-path-curly-brace': [
+ include('whitespace'),
+ include('comments'),
+ (r'=>', Operator),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation),
+ include('qualified-name'),
+ ],
+ 'package': [
+ (r'(?<=[\n;])', Text, '#pop'),
+ (r':', Punctuation, '#pop'),
+ include('comments'),
+ include('qualified-name'),
+ ],
+ 'interpolated-string-triple': [
+ (r'"""(?!")', String, '#pop'),
+ (r'"', String),
+ include('interpolated-string-common'),
+ ],
+ 'interpolated-string': [
+ (r'"', String, '#pop'),
+ include('interpolated-string-common'),
+ ],
+ 'interpolated-string-brace': [
+ (r'\}', String.Interpol, '#pop'),
+ (r'\{', Punctuation, 'interpolated-string-nested-brace'),
+ include('root'),
+ ],
+ 'interpolated-string-nested-brace': [
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ include('root'),
+ ],
+
+ # Helpers
+ 'qualified-name': [
+ (idUpper, Name.Class),
+ (r'(%s)(\.)' % anyId, bygroups(Name.Namespace, Punctuation)),
+ (r'\.', Punctuation),
+ (anyId, Name),
+ (r'[^\S\n]+', Whitespace),
+ ],
+ 'interpolated-string-common': [
+ (r'[^"$\\]+', String),
+ (r'\$\$', String.Escape),
+ (r'(\$)(%s)' % simpleInterpolatedVariable,
+ bygroups(String.Interpol, Name)),
+ (r'\$\{', String.Interpol, 'interpolated-string-brace'),
+ (r'\\.', String),
+ ],
+ }
+
+
+class GosuLexer(RegexLexer):
+ """
+ For Gosu source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Gosu'
+ aliases = ['gosu']
+ filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
+ mimetypes = ['text/x-gosu']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # modifiers etc.
+ r'([a-zA-Z_]\w*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ (r'[^\S\n]+', Whitespace),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@[a-zA-Z_][\w.]*', Name.Decorator),
+ (r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
+ r'index|while|do|continue|break|return|try|catch|finally|this|'
+ r'throw|new|switch|case|default|eval|super|outer|classpath|'
+ r'using)\b', Keyword),
+ (r'(var|delegate|construct|function|private|internal|protected|'
+ r'public|abstract|override|final|static|extends|transient|'
+ r'implements|represents|readonly)\b', Keyword.Declaration),
+ (r'(property)(\s+)(get|set)?', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration)),
+ (r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
+ Keyword.Type),
+ (r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
+ (r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
+ (r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class)),
+ (r'(uses)(\s+)([\w.]+\*?)',
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+ (r'"', String, 'string'),
+ (r'(\??[.#])([a-zA-Z_]\w*)',
+ bygroups(Operator, Name.Attribute)),
+ (r'(:)([a-zA-Z_]\w*)',
+ bygroups(Operator, Name.Attribute)),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'and|or|not|[\\~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'\n', Whitespace)
+ ],
+ 'templateText': [
+ (r'(\\<)|(\\\$)', String),
+ (r'(<%@\s+)(extends|params)',
+ bygroups(Operator, Name.Decorator), 'stringTemplate'),
+ (r'<%!--.*?--%>', Comment.Multiline),
+ (r'(<%)|(<%=)', Operator, 'stringTemplate'),
+ (r'\$\{', Operator, 'stringTemplateShorthand'),
+ (r'.', String)
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ include('templateText')
+ ],
+ 'stringTemplate': [
+ (r'"', String, 'string'),
+ (r'%>', Operator, '#pop'),
+ include('root')
+ ],
+ 'stringTemplateShorthand': [
+ (r'"', String, 'string'),
+ (r'\{', Operator, 'stringTemplateShorthand'),
+ (r'\}', Operator, '#pop'),
+ include('root')
+ ],
+ }
+
+
+class GosuTemplateLexer(Lexer):
+ """
+ For Gosu templates.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Gosu Template'
+ aliases = ['gst']
+ filenames = ['*.gst']
+ mimetypes = ['text/x-gosu-template']
+
+ def get_tokens_unprocessed(self, text):
+ lexer = GosuLexer()
+ stack = ['templateText']
+ yield from lexer.get_tokens_unprocessed(text, stack)
+
+
+class GroovyLexer(RegexLexer):
+ """
+ For Groovy source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Groovy'
+ url = 'https://groovy-lang.org/'
+ aliases = ['groovy']
+ filenames = ['*.groovy','*.gradle']
+ mimetypes = ['text/x-groovy']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # Groovy allows a file to start with a shebang
+ (r'#!(.*?)$', Comment.Preproc, 'base'),
+ default('base'),
+ ],
+ 'base': [
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ # keywords: go before method names to avoid lexing "throw new XYZ"
+ # as a method signature
+ (r'(assert|break|case|catch|continue|default|do|else|finally|for|'
+ r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b',
+ Keyword),
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
+ r'('
+ r'[a-zA-Z_]\w*' # method name
+ r'|"(?:\\\\|\\[^\\]|[^"\\])*"' # or double-quoted method name
+ r"|'(?:\\\\|\\[^\\]|[^'\\])*'" # or single-quoted method name
+ r')'
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ (r'@[a-zA-Z_][\w.]*', Name.Decorator),
+ (r'(abstract|const|enum|extends|final|implements|native|private|'
+ r'protected|public|static|strictfp|super|synchronized|throws|'
+ r'transient|volatile)\b', Keyword.Declaration),
+ (r'(def|boolean|byte|char|double|float|int|long|short|void)\b',
+ Keyword.Type),
+ (r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Whitespace),
+ 'class'),
+ (r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
+ (r'""".*?"""', String.Double),
+ (r"'''.*?'''", String.Single),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'\$/((?!/\$).)*/\$', String),
+ (r'/(\\\\|\\[^\\]|[^/\\])*/', String),
+ (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
+ (r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
+ (r'[a-zA-Z_]\w*:', Name.Label),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Whitespace)
+ ],
+ 'class': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[\w.]+\*?', Name.Namespace, '#pop')
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'groovy')
+
+
+class IokeLexer(RegexLexer):
+ """
+ For Ioke (a strongly typed, dynamic,
+ prototype based programming language) source.
+
+ .. versionadded:: 1.4
+ """
+ name = 'Ioke'
+ url = 'https://ioke.org/'
+ filenames = ['*.ik']
+ aliases = ['ioke', 'ik']
+ mimetypes = ['text/x-iokesrc']
+ tokens = {
+ 'interpolatableText': [
+ (r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
+ r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
+ (r'#\{', Punctuation, 'textInterpolationRoot')
+ ],
+
+ 'text': [
+ (r'(?<!\\)"', String, '#pop'),
+ include('interpolatableText'),
+ (r'[^"]', String)
+ ],
+
+ 'documentation': [
+ (r'(?<!\\)"', String.Doc, '#pop'),
+ include('interpolatableText'),
+ (r'[^"]', String.Doc)
+ ],
+
+ 'textInterpolationRoot': [
+ (r'\}', Punctuation, '#pop'),
+ include('root')
+ ],
+
+ 'slashRegexp': [
+ (r'(?<!\\)/[im-psux]*', String.Regex, '#pop'),
+ include('interpolatableText'),
+ (r'\\/', String.Regex),
+ (r'[^/]', String.Regex)
+ ],
+
+ 'squareRegexp': [
+ (r'(?<!\\)][im-psux]*', String.Regex, '#pop'),
+ include('interpolatableText'),
+ (r'\\]', String.Regex),
+ (r'[^\]]', String.Regex)
+ ],
+
+ 'squareText': [
+ (r'(?<!\\)]', String, '#pop'),
+ include('interpolatableText'),
+ (r'[^\]]', String)
+ ],
+
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+
+ # Comments
+ (r';(.*?)\n', Comment),
+ (r'\A#!(.*?)\n', Comment),
+
+ # Regexps
+ (r'#/', String.Regex, 'slashRegexp'),
+ (r'#r\[', String.Regex, 'squareRegexp'),
+
+ # Symbols
+ (r':[\w!:?]+', String.Symbol),
+ (r'[\w!:?]+:(?![\w!?])', String.Other),
+ (r':"(\\\\|\\[^\\]|[^"\\])*"', String.Symbol),
+
+ # Documentation
+ (r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
+ r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
+ r'|(?<=dsyntax\())(\s*)"', String.Doc, 'documentation'),
+
+ # Text
+ (r'"', String, 'text'),
+ (r'#\[', String, 'squareText'),
+
+ # Mimic
+ (r'\w[\w!:?]+(?=\s*=.*mimic\s)', Name.Entity),
+
+ # Assignment
+ (r'[a-zA-Z_][\w!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
+ Name.Variable),
+
+ # keywords
+ (r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
+ r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
+ r'with)(?![\w!:?])', Keyword.Reserved),
+
+ # Origin
+ (r'(eval|mimic|print|println)(?![\w!:?])', Keyword),
+
+ # Base
+ (r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
+ r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
+ r'(?![\w!:?])', Keyword),
+
+ # Ground
+ (r'(stackTraceAsText)(?![\w!:?])', Keyword),
+
+ # DefaultBehaviour Literals
+ (r'(dict|list|message|set)(?![\w!:?])', Keyword.Reserved),
+
+ # DefaultBehaviour Case
+ (r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
+ r'case:otherwise|case:xor)(?![\w!:?])', Keyword.Reserved),
+
+ # DefaultBehaviour Reflection
+ (r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
+ r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
+ r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
+ r'(?![\w!:?])', Keyword),
+
+ # DefaultBehaviour Aspects
+ (r'(after|around|before)(?![\w!:?])', Keyword.Reserved),
+
+ # DefaultBehaviour
+ (r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
+ r'(?![\w!:?])', Keyword),
+ (r'(use|destructuring)', Keyword.Reserved),
+
+ # DefaultBehavior BaseBehavior
+ (r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
+ r'documentation|identity|removeCell!|undefineCell)'
+ r'(?![\w!:?])', Keyword),
+
+ # DefaultBehavior Internal
+ (r'(internal:compositeRegexp|internal:concatenateText|'
+ r'internal:createDecimal|internal:createNumber|'
+ r'internal:createRegexp|internal:createText)'
+ r'(?![\w!:?])', Keyword.Reserved),
+
+ # DefaultBehaviour Conditions
+ (r'(availableRestarts|bind|error\!|findRestart|handle|'
+ r'invokeRestart|rescue|restart|signal\!|warn\!)'
+ r'(?![\w!:?])', Keyword.Reserved),
+
+ # constants
+ (r'(nil|false|true)(?![\w!:?])', Name.Constant),
+
+ # names
+ (r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
+ r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
+ r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
+ r'Conditions|Definitions|FlowControl|Internal|Literals|'
+ r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
+ r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
+ r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
+ r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
+ r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
+ r'System|Text|Tuple)(?![\w!:?])', Name.Builtin),
+
+ # functions
+ ('(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
+ 'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
+ '(?![\\w!:?])', Name.Function),
+
+ # Numbers
+ (r'-?0[xX][0-9a-fA-F]+', Number.Hex),
+ (r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'-?\d+', Number.Integer),
+
+ (r'#\(', Punctuation),
+
+ # Operators
+ (r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
+ r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
+ r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
+ r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
+ r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
+ r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
+ r'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
+ (r'(and|nand|or|xor|nor|return|import)(?![\w!?])',
+ Operator),
+
+ # Punctuation
+ (r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|\{|\})', Punctuation),
+
+ # kinds
+ (r'[A-Z][\w!:?]*', Name.Class),
+
+ # default cellnames
+ (r'[a-z_][\w!:?]*', Name)
+ ]
+ }
+
+
+class ClojureLexer(RegexLexer):
+ """
+ Lexer for Clojure source code.
+
+ .. versionadded:: 0.11
+ """
+ name = 'Clojure'
+ url = 'http://clojure.org/'
+ aliases = ['clojure', 'clj']
+ filenames = ['*.clj', '*.cljc']
+ mimetypes = ['text/x-clojure', 'application/x-clojure']
+
+ special_forms = (
+ '.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop'
+ )
+
+ # It's safe to consider 'ns' a declaration thing because it defines a new
+ # namespace.
+ declarations = (
+ 'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod',
+ 'defstruct', 'defonce', 'declare', 'definline', 'definterface',
+ 'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns'
+ )
+
+ builtins = (
+ '*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..',
+ 'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
+ 'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
+ 'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
+ 'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
+ 'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
+ 'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
+ 'butlast', 'byte', 'cast', 'char', 'children', 'class',
+ 'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
+ 'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not',
+ 'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
+ 'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
+ 'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
+ 'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
+ 'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
+ 'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for',
+ 'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get',
+ 'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
+ 'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
+ 'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
+ 'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
+ 'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
+ 'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file',
+ 'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
+ 'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
+ 'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
+ 'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
+ 'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
+ 'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
+ 'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
+ 'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
+ 'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
+ 'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
+ 're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
+ 'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
+ 'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
+ 'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
+ 'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
+ 'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
+ 'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
+ 'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
+ 'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
+ 'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
+ 'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
+ 'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
+ 'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
+ 'vector?', 'when', 'when-first', 'when-let', 'when-not',
+ 'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
+ 'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper')
+
+ # valid names for identifiers
+ # well, names can only not consist fully of numbers
+ # but this should be good enough for now
+
+ # TODO / should divide keywords/symbols into namespace/rest
+ # but that's hard, so just pretend / is part of the name
+ valid_name = r'(?!#)[\w!$%*+<=>?/.#|-]+'
+
+ tokens = {
+ 'root': [
+ # the comments - always starting with semicolon
+ # and going to the end of the line
+ (r';.*$', Comment.Single),
+
+ # whitespaces - usually not relevant
+ (r',+', Text),
+ (r'\s+', Whitespace),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+/\d+', Number),
+ (r'-?\d+', Number.Integer),
+ (r'0x-?[abcdef\d]+', Number.Hex),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"'" + valid_name, String.Symbol),
+ (r"\\(.|[a-z]+)", String.Char),
+
+ # keywords
+ (r'::?#?' + valid_name, String.Symbol),
+
+ # special operators
+ (r'~@|[`\'#^~&@]', Operator),
+
+ # highlight the special forms
+ (words(special_forms, suffix=' '), Keyword),
+
+ # Technically, only the special forms are 'keywords'. The problem
+ # is that only treating them as keywords means that things like
+ # 'defn' and 'ns' need to be highlighted as builtins. This is ugly
+ # and weird for most styles. So, as a compromise we're going to
+ # highlight them as Keyword.Declarations.
+ (words(declarations, suffix=' '), Keyword.Declaration),
+
+ # highlight the builtins
+ (words(builtins, suffix=' '), Name.Builtin),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Function),
+
+ # find the remaining variables
+ (valid_name, Name.Variable),
+
+ # Clojure accepts vector notation
+ (r'(\[|\])', Punctuation),
+
+ # Clojure accepts map notation
+ (r'(\{|\})', Punctuation),
+
+ # the famous parentheses!
+ (r'(\(|\))', Punctuation),
+ ],
+ }
+
+
+class ClojureScriptLexer(ClojureLexer):
+ """
+ Lexer for ClojureScript source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'ClojureScript'
+ url = 'http://clojure.org/clojurescript'
+ aliases = ['clojurescript', 'cljs']
+ filenames = ['*.cljs']
+ mimetypes = ['text/x-clojurescript', 'application/x-clojurescript']
+
+
+class TeaLangLexer(RegexLexer):
+ """
+ For Tea source code. Only used within a
+ TeaTemplateLexer.
+
+ .. versionadded:: 1.5
+ """
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][\w\.\[\]]*\s+)+?)' # return arguments
+ r'([a-zA-Z_]\w*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@[a-zA-Z_][\w\.]*', Name.Decorator),
+ (r'(and|break|else|foreach|if|in|not|or|reverse)\b',
+ Keyword),
+ (r'(as|call|define)\b', Keyword.Declaration),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(template)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'template'),
+ (r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
+ (r'[a-zA-Z_]\w*:', Name.Label),
+ (r'[a-zA-Z_\$]\w*', Name),
+ (r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Whitespace)
+ ],
+ 'template': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[\w.]+\*?', Name.Namespace, '#pop')
+ ],
+ }
+
+
+class CeylonLexer(RegexLexer):
+ """
+ For Ceylon source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Ceylon'
+ url = 'http://ceylon-lang.org/'
+ aliases = ['ceylon']
+ filenames = ['*.ceylon']
+ mimetypes = ['text/x-ceylon']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
+ r'([a-zA-Z_]\w*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'(shared|abstract|formal|default|actual|variable|deprecated|small|'
+ r'late|literal|doc|by|see|throws|optional|license|tagged|final|native|'
+ r'annotation|sealed)\b', Name.Decorator),
+ (r'(break|case|catch|continue|else|finally|for|in|'
+ r'if|return|switch|this|throw|try|while|is|exists|dynamic|'
+ r'nonempty|then|outer|assert|let)\b', Keyword),
+ (r'(abstracts|extends|satisfies|'
+ r'super|given|of|out|assign)\b', Keyword.Declaration),
+ (r'(function|value|void|new)\b',
+ Keyword.Type),
+ (r'(assembly|module|package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(class|interface|object|alias)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'class'),
+ (r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
+ (r'(\.)([a-z_]\w*)',
+ bygroups(Operator, Name.Attribute)),
+ (r'[a-zA-Z_]\w*:', Name.Label),
+ (r'[a-zA-Z_]\w*', Name),
+ (r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
+ (r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
+ (r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
+ Number.Float),
+ (r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
+ (r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
+ Number.Float),
+ (r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex),
+ (r'#[0-9a-fA-F]+', Number.Hex),
+ (r'\$([01]{4})(_[01]{4})+', Number.Bin),
+ (r'\$[01]+', Number.Bin),
+ (r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer),
+ (r'[0-9]+[kMGTP]?', Number.Integer),
+ (r'\n', Whitespace)
+ ],
+ 'class': [
+ (r'[A-Za-z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[a-z][\w.]*',
+ Name.Namespace, '#pop')
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ }
+
+
+class KotlinLexer(RegexLexer):
+ """
+ For Kotlin source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Kotlin'
+ url = 'http://kotlinlang.org/'
+ aliases = ['kotlin']
+ filenames = ['*.kt', '*.kts']
+ mimetypes = ['text/x-kotlin']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ kt_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
+ '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
+ 'Mn', 'Mc') + ']*')
+
+ kt_space_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
+ '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
+ 'Mn', 'Mc', 'Zs')
+ + r'\'~!%^&*()+=|\[\]:;,.<>/\?-]*')
+
+ kt_id = '(' + kt_name + '|`' + kt_space_name + '`)'
+
+ modifiers = (r'actual|abstract|annotation|companion|const|crossinline|'
+ r'data|enum|expect|external|final|infix|inline|inner|'
+ r'internal|lateinit|noinline|open|operator|override|private|'
+ r'protected|public|sealed|suspend|tailrec|value')
+
+ tokens = {
+ 'root': [
+ # Whitespaces
+ (r'[^\S\n]+', Whitespace),
+ (r'\s+', Whitespace),
+ (r'\\$', String.Escape), # line continuation
+ (r'\n', Whitespace),
+ # Comments
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'^(#!/.+?)(\n)', bygroups(Comment.Single, Whitespace)), # shebang for kotlin scripts
+ (r'/[*].*?[*]/', Comment.Multiline),
+ # Keywords
+ (r'as\?', Keyword),
+ (r'(as|break|by|catch|constructor|continue|do|dynamic|else|finally|'
+ r'get|for|if|init|[!]*in|[!]*is|out|reified|return|set|super|this|'
+ r'throw|try|typealias|typeof|vararg|when|where|while)\b', Keyword),
+ (r'it\b', Name.Builtin),
+ # Built-in types
+ (words(('Boolean?', 'Byte?', 'Char?', 'Double?', 'Float?',
+ 'Int?', 'Long?', 'Short?', 'String?', 'Any?', 'Unit?')), Keyword.Type),
+ (words(('Boolean', 'Byte', 'Char', 'Double', 'Float',
+ 'Int', 'Long', 'Short', 'String', 'Any', 'Unit'), suffix=r'\b'), Keyword.Type),
+ # Constants
+ (r'(true|false|null)\b', Keyword.Constant),
+ # Imports
+ (r'(package|import)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Namespace)),
+ # Dot access
+ (r'(\?\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)),
+ (r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation, Name.Attribute)),
+ # Annotations
+ (r'@[^\W\d][\w.]*', Name.Decorator),
+ # Labels
+ (r'[^\W\d][\w.]+@', Name.Decorator),
+ # Object expression
+ (r'(object)(\s+)(:)(\s+)', bygroups(Keyword, Whitespace, Punctuation, Whitespace), 'class'),
+ # Types
+ (r'((?:(?:' + modifiers + r'|fun)\s+)*)(class|interface|object)(\s+)',
+ bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'class'),
+ # Variables
+ (r'(var|val)(\s+)(\()', bygroups(Keyword.Declaration, Whitespace, Punctuation),
+ 'destructuring_assignment'),
+ (r'((?:(?:' + modifiers + r')\s+)*)(var|val)(\s+)',
+ bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'variable'),
+ # Functions
+ (r'((?:(?:' + modifiers + r')\s+)*)(fun)(\s+)',
+ bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'function'),
+ # Operators
+ (r'::|!!|\?[:.]', Operator),
+ (r'[~^*!%&\[\]<>|+=/?-]', Operator),
+ # Punctuation
+ (r'[{}();:.,]', Punctuation),
+ # Strings
+ (r'"""', String, 'multiline_string'),
+ (r'"', String, 'string'),
+ (r"'\\.'|'[^\\]'", String.Char),
+ # Numbers
+ (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ # Identifiers
+ (r'' + kt_id + r'((\?[^.])?)', Name) # additionally handle nullable types
+ ],
+ 'class': [
+ (kt_id, Name.Class, '#pop')
+ ],
+ 'variable': [
+ (kt_id, Name.Variable, '#pop')
+ ],
+ 'destructuring_assignment': [
+ (r',', Punctuation),
+ (r'\s+', Whitespace),
+ (kt_id, Name.Variable),
+ (r'(:)(\s+)(' + kt_id + ')', bygroups(Punctuation, Whitespace, Name)),
+ (r'<', Operator, 'generic'),
+ (r'\)', Punctuation, '#pop')
+ ],
+ 'function': [
+ (r'<', Operator, 'generic'),
+ (r'' + kt_id + r'(\.)' + kt_id, bygroups(Name, Punctuation, Name.Function), '#pop'),
+ (kt_id, Name.Function, '#pop')
+ ],
+ 'generic': [
+ (r'(>)(\s*)', bygroups(Operator, Whitespace), '#pop'),
+ (r':', Punctuation),
+ (r'(reified|out|in)\b', Keyword),
+ (r',', Punctuation),
+ (r'\s+', Whitespace),
+ (kt_id, Name)
+ ],
+ 'modifiers': [
+ (r'\w+', Keyword.Declaration),
+ (r'\s+', Whitespace),
+ default('#pop')
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ include('string_common')
+ ],
+ 'multiline_string': [
+ (r'"""', String, '#pop'),
+ (r'"', String),
+ include('string_common')
+ ],
+ 'string_common': [
+ (r'\\\\', String), # escaped backslash
+ (r'\\"', String), # escaped quote
+ (r'\\', String), # bare backslash
+ (r'\$\{', String.Interpol, 'interpolation'),
+ (r'(\$)(\w+)', bygroups(String.Interpol, Name)),
+ (r'[^\\"$]+', String)
+ ],
+ 'interpolation': [
+ (r'"', String),
+ (r'\$\{', String.Interpol, 'interpolation'),
+ (r'\{', Punctuation, 'scope'),
+ (r'\}', String.Interpol, '#pop'),
+ include('root')
+ ],
+ 'scope': [
+ (r'\{', Punctuation, 'scope'),
+ (r'\}', Punctuation, '#pop'),
+ include('root')
+ ]
+ }
+
+
+class XtendLexer(RegexLexer):
+ """
+ For Xtend source code.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Xtend'
+ url = 'https://www.eclipse.org/xtend/'
+ aliases = ['xtend']
+ filenames = ['*.xtend']
+ mimetypes = ['text/x-xtend']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
+ r'([a-zA-Z_$][\w$]*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@[a-zA-Z_][\w.]*', Name.Decorator),
+ (r'(assert|break|case|catch|continue|default|do|else|finally|for|'
+ r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|'
+ r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b',
+ Keyword),
+ (r'(def|abstract|const|enum|extends|final|implements|native|private|'
+ r'protected|public|static|strictfp|super|synchronized|throws|'
+ r'transient|volatile)\b', Keyword.Declaration),
+ (r'(boolean|byte|char|double|float|int|long|short|void)\b',
+ Keyword.Type),
+ (r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Whitespace),
+ 'class'),
+ (r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
+ (r"(''')", String, 'template'),
+ (r'(\u00BB)', String, 'template'),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[a-zA-Z_]\w*:', Name.Label),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Whitespace)
+ ],
+ 'class': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[\w.]+\*?', Name.Namespace, '#pop')
+ ],
+ 'template': [
+ (r"'''", String, '#pop'),
+ (r'\u00AB', String, '#pop'),
+ (r'.', String)
+ ],
+ }
+
+
+class PigLexer(RegexLexer):
+ """
+ For Pig Latin source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Pig'
+ url = 'https://pig.apache.org/'
+ aliases = ['pig']
+ filenames = ['*.pig']
+ mimetypes = ['text/x-pig']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'--.*', Comment),
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ (r'\\$', String.Escape),
+ (r'\\', Text),
+ (r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
+ include('keywords'),
+ include('types'),
+ include('builtins'),
+ include('punct'),
+ include('operators'),
+ (r'[0-9]*\.[0-9]+(e[0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Whitespace),
+ (r'([a-z_]\w*)(\s*)(\()',
+ bygroups(Name.Function, Whitespace, Punctuation)),
+ (r'[()#:]', Text),
+ (r'[^(:#\'")\s]+', Text),
+ (r'\S+\s+', Text) # TODO: make tests pass without \s+
+ ],
+ 'keywords': [
+ (r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
+ r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
+ r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
+ r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
+ r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
+ r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
+ r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
+ r'stream|through|union|using|void)\b', Keyword)
+ ],
+ 'builtins': [
+ (r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
+ r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
+ r'TOKENIZE)\b', Name.Builtin)
+ ],
+ 'types': [
+ (r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
+ r'int|long|tuple)\b', Keyword.Type)
+ ],
+ 'punct': [
+ (r'[;(){}\[\]]', Punctuation),
+ ],
+ 'operators': [
+ (r'[#=,./%+\-?]', Operator),
+ (r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
+ (r'(==|<=|<|>=|>|!=)', Operator),
+ ],
+ }
+
+
+class GoloLexer(RegexLexer):
+ """
+ For Golo source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Golo'
+ url = 'http://golo-lang.org/'
+ filenames = ['*.golo']
+ aliases = ['golo']
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Whitespace),
+
+ (r'#.*$', Comment),
+
+ (r'(\^|\.\.\.|:|\?:|->|==|!=|=|\+|\*|%|/|<=|<|>=|>|=|\.)',
+ Operator),
+ (r'(?<=[^-])(-)(?=[^-])', Operator),
+
+ (r'(?<=[^`])(is|isnt|and|or|not|oftype|in|orIfNull)\b', Operator.Word),
+ (r'[]{}|(),[]', Punctuation),
+
+ (r'(module|import)(\s+)',
+ bygroups(Keyword.Namespace, Whitespace),
+ 'modname'),
+ (r'\b([a-zA-Z_][\w$.]*)(::)', bygroups(Name.Namespace, Punctuation)),
+ (r'\b([a-zA-Z_][\w$]*(?:\.[a-zA-Z_][\w$]*)+)\b', Name.Namespace),
+
+ (r'(let|var)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace),
+ 'varname'),
+ (r'(struct)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace),
+ 'structname'),
+ (r'(function)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace),
+ 'funcname'),
+
+ (r'(null|true|false)\b', Keyword.Constant),
+ (r'(augment|pimp'
+ r'|if|else|case|match|return'
+ r'|case|when|then|otherwise'
+ r'|while|for|foreach'
+ r'|try|catch|finally|throw'
+ r'|local'
+ r'|continue|break)\b', Keyword),
+
+ (r'(map|array|list|set|vector|tuple)(\[)',
+ bygroups(Name.Builtin, Punctuation)),
+ (r'(print|println|readln|raise|fun'
+ r'|asInterfaceInstance)\b', Name.Builtin),
+ (r'(`?[a-zA-Z_][\w$]*)(\()',
+ bygroups(Name.Function, Punctuation)),
+
+ (r'-?[\d_]*\.[\d_]*([eE][+-]?\d[\d_]*)?F?', Number.Float),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'-?\d[\d_]*L', Number.Integer.Long),
+ (r'-?\d[\d_]*', Number.Integer),
+
+ (r'`?[a-zA-Z_][\w$]*', Name),
+ (r'@[a-zA-Z_][\w$.]*', Name.Decorator),
+
+ (r'"""', String, combined('stringescape', 'triplestring')),
+ (r'"', String, combined('stringescape', 'doublestring')),
+ (r"'", String, combined('stringescape', 'singlestring')),
+ (r'----((.|\n)*?)----', String.Doc)
+
+ ],
+
+ 'funcname': [
+ (r'`?[a-zA-Z_][\w$]*', Name.Function, '#pop'),
+ ],
+ 'modname': [
+ (r'[a-zA-Z_][\w$.]*\*?', Name.Namespace, '#pop')
+ ],
+ 'structname': [
+ (r'`?[\w.]+\*?', Name.Class, '#pop')
+ ],
+ 'varname': [
+ (r'`?[a-zA-Z_][\w$]*', Name.Variable, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\\'"\n]+', String),
+ (r'[\'"\\]', String)
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'triplestring': [
+ (r'"""', String, '#pop'),
+ include('string'),
+ (r'\n', String),
+ ],
+ 'doublestring': [
+ (r'"', String.Double, '#pop'),
+ include('string'),
+ ],
+ 'singlestring': [
+ (r"'", String, '#pop'),
+ include('string'),
+ ],
+ 'operators': [
+ (r'[#=,./%+\-?]', Operator),
+ (r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
+ (r'(==|<=|<|>=|>|!=)', Operator),
+ ],
+ }
+
+
+class JasminLexer(RegexLexer):
+ """
+ For Jasmin assembly code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Jasmin'
+ url = 'http://jasmin.sourceforge.net/'
+ aliases = ['jasmin', 'jasminxt']
+ filenames = ['*.j']
+
+ _whitespace = r' \n\t\r'
+ _ws = r'(?:[%s]+)' % _whitespace
+ _separator = r'%s:=' % _whitespace
+ _break = r'(?=[%s]|$)' % _separator
+ _name = r'[^%s]+' % _separator
+ _unqualified_name = r'(?:[^%s.;\[/]+)' % _separator
+
+ tokens = {
+ 'default': [
+ (r'\n', Whitespace, '#pop'),
+ (r"'", String.Single, ('#pop', 'quote')),
+ (r'"', String.Double, 'string'),
+ (r'=', Punctuation),
+ (r':', Punctuation, 'label'),
+ (_ws, Whitespace),
+ (r';.*', Comment.Single),
+ (r'(\$[-+])?0x-?[\da-fA-F]+%s' % _break, Number.Hex),
+ (r'(\$[-+]|\+)?-?\d+%s' % _break, Number.Integer),
+ (r'-?(\d+\.\d*|\.\d+)([eE][-+]?\d+)?[fFdD]?'
+ r'[\x00-\x08\x0b\x0c\x0e-\x1f]*%s' % _break, Number.Float),
+ (r'\$%s' % _name, Name.Variable),
+
+ # Directives
+ (r'\.annotation%s' % _break, Keyword.Reserved, 'annotation'),
+ (r'(\.attribute|\.bytecode|\.debug|\.deprecated|\.enclosing|'
+ r'\.interface|\.line|\.signature|\.source|\.stack|\.var|abstract|'
+ r'annotation|bridge|class|default|enum|field|final|fpstrict|'
+ r'interface|native|private|protected|public|signature|static|'
+ r'synchronized|synthetic|transient|varargs|volatile)%s' % _break,
+ Keyword.Reserved),
+ (r'\.catch%s' % _break, Keyword.Reserved, 'caught-exception'),
+ (r'(\.class|\.implements|\.inner|\.super|inner|invisible|'
+ r'invisibleparam|outer|visible|visibleparam)%s' % _break,
+ Keyword.Reserved, 'class/convert-dots'),
+ (r'\.field%s' % _break, Keyword.Reserved,
+ ('descriptor/convert-dots', 'field')),
+ (r'(\.end|\.limit|use)%s' % _break, Keyword.Reserved,
+ 'no-verification'),
+ (r'\.method%s' % _break, Keyword.Reserved, 'method'),
+ (r'\.set%s' % _break, Keyword.Reserved, 'var'),
+ (r'\.throws%s' % _break, Keyword.Reserved, 'exception'),
+ (r'(from|offset|to|using)%s' % _break, Keyword.Reserved, 'label'),
+ (r'is%s' % _break, Keyword.Reserved,
+ ('descriptor/convert-dots', 'var')),
+ (r'(locals|stack)%s' % _break, Keyword.Reserved, 'verification'),
+ (r'method%s' % _break, Keyword.Reserved, 'enclosing-method'),
+
+ # Instructions
+ (words((
+ 'aaload', 'aastore', 'aconst_null', 'aload', 'aload_0', 'aload_1', 'aload_2',
+ 'aload_3', 'aload_w', 'areturn', 'arraylength', 'astore', 'astore_0', 'astore_1',
+ 'astore_2', 'astore_3', 'astore_w', 'athrow', 'baload', 'bastore', 'bipush',
+ 'breakpoint', 'caload', 'castore', 'd2f', 'd2i', 'd2l', 'dadd', 'daload', 'dastore',
+ 'dcmpg', 'dcmpl', 'dconst_0', 'dconst_1', 'ddiv', 'dload', 'dload_0', 'dload_1',
+ 'dload_2', 'dload_3', 'dload_w', 'dmul', 'dneg', 'drem', 'dreturn', 'dstore', 'dstore_0',
+ 'dstore_1', 'dstore_2', 'dstore_3', 'dstore_w', 'dsub', 'dup', 'dup2', 'dup2_x1',
+ 'dup2_x2', 'dup_x1', 'dup_x2', 'f2d', 'f2i', 'f2l', 'fadd', 'faload', 'fastore', 'fcmpg',
+ 'fcmpl', 'fconst_0', 'fconst_1', 'fconst_2', 'fdiv', 'fload', 'fload_0', 'fload_1',
+ 'fload_2', 'fload_3', 'fload_w', 'fmul', 'fneg', 'frem', 'freturn', 'fstore', 'fstore_0',
+ 'fstore_1', 'fstore_2', 'fstore_3', 'fstore_w', 'fsub', 'i2b', 'i2c', 'i2d', 'i2f', 'i2l',
+ 'i2s', 'iadd', 'iaload', 'iand', 'iastore', 'iconst_0', 'iconst_1', 'iconst_2',
+ 'iconst_3', 'iconst_4', 'iconst_5', 'iconst_m1', 'idiv', 'iinc', 'iinc_w', 'iload',
+ 'iload_0', 'iload_1', 'iload_2', 'iload_3', 'iload_w', 'imul', 'ineg', 'int2byte',
+ 'int2char', 'int2short', 'ior', 'irem', 'ireturn', 'ishl', 'ishr', 'istore', 'istore_0',
+ 'istore_1', 'istore_2', 'istore_3', 'istore_w', 'isub', 'iushr', 'ixor', 'l2d', 'l2f',
+ 'l2i', 'ladd', 'laload', 'land', 'lastore', 'lcmp', 'lconst_0', 'lconst_1', 'ldc2_w',
+ 'ldiv', 'lload', 'lload_0', 'lload_1', 'lload_2', 'lload_3', 'lload_w', 'lmul', 'lneg',
+ 'lookupswitch', 'lor', 'lrem', 'lreturn', 'lshl', 'lshr', 'lstore', 'lstore_0',
+ 'lstore_1', 'lstore_2', 'lstore_3', 'lstore_w', 'lsub', 'lushr', 'lxor',
+ 'monitorenter', 'monitorexit', 'nop', 'pop', 'pop2', 'ret', 'ret_w', 'return', 'saload',
+ 'sastore', 'sipush', 'swap'), suffix=_break), Keyword.Reserved),
+ (r'(anewarray|checkcast|instanceof|ldc|ldc_w|new)%s' % _break,
+ Keyword.Reserved, 'class/no-dots'),
+ (r'invoke(dynamic|interface|nonvirtual|special|'
+ r'static|virtual)%s' % _break, Keyword.Reserved,
+ 'invocation'),
+ (r'(getfield|putfield)%s' % _break, Keyword.Reserved,
+ ('descriptor/no-dots', 'field')),
+ (r'(getstatic|putstatic)%s' % _break, Keyword.Reserved,
+ ('descriptor/no-dots', 'static')),
+ (words((
+ 'goto', 'goto_w', 'if_acmpeq', 'if_acmpne', 'if_icmpeq',
+ 'if_icmpge', 'if_icmpgt', 'if_icmple', 'if_icmplt', 'if_icmpne',
+ 'ifeq', 'ifge', 'ifgt', 'ifle', 'iflt', 'ifne', 'ifnonnull',
+ 'ifnull', 'jsr', 'jsr_w'), suffix=_break),
+ Keyword.Reserved, 'label'),
+ (r'(multianewarray|newarray)%s' % _break, Keyword.Reserved,
+ 'descriptor/convert-dots'),
+ (r'tableswitch%s' % _break, Keyword.Reserved, 'table')
+ ],
+ 'quote': [
+ (r"'", String.Single, '#pop'),
+ (r'\\u[\da-fA-F]{4}', String.Escape),
+ (r"[^'\\]+", String.Single)
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'\\([nrtfb"\'\\]|u[\da-fA-F]{4}|[0-3]?[0-7]{1,2})',
+ String.Escape),
+ (r'[^"\\]+', String.Double)
+ ],
+ 'root': [
+ (r'\n+', Whitespace),
+ (r"'", String.Single, 'quote'),
+ include('default'),
+ (r'(%s)([ \t\r]*)(:)' % _name,
+ bygroups(Name.Label, Whitespace, Punctuation)),
+ (_name, String.Other)
+ ],
+ 'annotation': [
+ (r'\n', Whitespace, ('#pop', 'annotation-body')),
+ (r'default%s' % _break, Keyword.Reserved,
+ ('#pop', 'annotation-default')),
+ include('default')
+ ],
+ 'annotation-body': [
+ (r'\n+', Whitespace),
+ (r'\.end%s' % _break, Keyword.Reserved, '#pop'),
+ include('default'),
+ (_name, String.Other, ('annotation-items', 'descriptor/no-dots'))
+ ],
+ 'annotation-default': [
+ (r'\n+', Whitespace),
+ (r'\.end%s' % _break, Keyword.Reserved, '#pop'),
+ include('default'),
+ default(('annotation-items', 'descriptor/no-dots'))
+ ],
+ 'annotation-items': [
+ (r"'", String.Single, 'quote'),
+ include('default'),
+ (_name, String.Other)
+ ],
+ 'caught-exception': [
+ (r'all%s' % _break, Keyword, '#pop'),
+ include('exception')
+ ],
+ 'class/convert-dots': [
+ include('default'),
+ (r'(L)((?:%s[/.])*)(%s)(;)' % (_unqualified_name, _name),
+ bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
+ '#pop'),
+ (r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
+ bygroups(Name.Namespace, Name.Class), '#pop')
+ ],
+ 'class/no-dots': [
+ include('default'),
+ (r'\[+', Punctuation, ('#pop', 'descriptor/no-dots')),
+ (r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
+ bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
+ '#pop'),
+ (r'((?:%s/)*)(%s)' % (_unqualified_name, _name),
+ bygroups(Name.Namespace, Name.Class), '#pop')
+ ],
+ 'descriptor/convert-dots': [
+ include('default'),
+ (r'\[+', Punctuation),
+ (r'(L)((?:%s[/.])*)(%s?)(;)' % (_unqualified_name, _name),
+ bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
+ '#pop'),
+ (r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
+ default('#pop')
+ ],
+ 'descriptor/no-dots': [
+ include('default'),
+ (r'\[+', Punctuation),
+ (r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
+ bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
+ '#pop'),
+ (r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
+ default('#pop')
+ ],
+ 'descriptors/convert-dots': [
+ (r'\)', Punctuation, '#pop'),
+ default('descriptor/convert-dots')
+ ],
+ 'enclosing-method': [
+ (_ws, Whitespace),
+ (r'(?=[^%s]*\()' % _separator, Text, ('#pop', 'invocation')),
+ default(('#pop', 'class/convert-dots'))
+ ],
+ 'exception': [
+ include('default'),
+ (r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
+ bygroups(Name.Namespace, Name.Exception), '#pop')
+ ],
+ 'field': [
+ (r'static%s' % _break, Keyword.Reserved, ('#pop', 'static')),
+ include('default'),
+ (r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
+ (_unqualified_name, _separator, _unqualified_name, _name),
+ bygroups(Name.Namespace, Name.Class, Name.Variable.Instance),
+ '#pop')
+ ],
+ 'invocation': [
+ include('default'),
+ (r'((?:%s[/.](?=[^%s(]*[/.]))*)(%s[/.])?(%s)(\()' %
+ (_unqualified_name, _separator, _unqualified_name, _name),
+ bygroups(Name.Namespace, Name.Class, Name.Function, Punctuation),
+ ('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
+ 'descriptor/convert-dots'))
+ ],
+ 'label': [
+ include('default'),
+ (_name, Name.Label, '#pop')
+ ],
+ 'method': [
+ include('default'),
+ (r'(%s)(\()' % _name, bygroups(Name.Function, Punctuation),
+ ('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
+ 'descriptor/convert-dots'))
+ ],
+ 'no-verification': [
+ (r'(locals|method|stack)%s' % _break, Keyword.Reserved, '#pop'),
+ include('default')
+ ],
+ 'static': [
+ include('default'),
+ (r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
+ (_unqualified_name, _separator, _unqualified_name, _name),
+ bygroups(Name.Namespace, Name.Class, Name.Variable.Class), '#pop')
+ ],
+ 'table': [
+ (r'\n+', Whitespace),
+ (r'default%s' % _break, Keyword.Reserved, '#pop'),
+ include('default'),
+ (_name, Name.Label)
+ ],
+ 'var': [
+ include('default'),
+ (_name, Name.Variable, '#pop')
+ ],
+ 'verification': [
+ include('default'),
+ (r'(Double|Float|Integer|Long|Null|Top|UninitializedThis)%s' %
+ _break, Keyword, '#pop'),
+ (r'Object%s' % _break, Keyword, ('#pop', 'class/no-dots')),
+ (r'Uninitialized%s' % _break, Keyword, ('#pop', 'label'))
+ ]
+ }
+
+ def analyse_text(text):
+ score = 0
+ if re.search(r'^\s*\.class\s', text, re.MULTILINE):
+ score += 0.5
+ if re.search(r'^\s*[a-z]+_[a-z]+\b', text, re.MULTILINE):
+ score += 0.3
+ if re.search(r'^\s*\.(attribute|bytecode|debug|deprecated|enclosing|'
+ r'inner|interface|limit|set|signature|stack)\b', text,
+ re.MULTILINE):
+ score += 0.6
+ return min(score, 1.0)
+
+
+class SarlLexer(RegexLexer):
+ """
+ For SARL source code.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'SARL'
+ url = 'http://www.sarl.io'
+ aliases = ['sarl']
+ filenames = ['*.sarl']
+ mimetypes = ['text/x-sarl']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
+ r'([a-zA-Z_$][\w$]*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Whitespace, Operator)),
+ (r'[^\S\n]+', Whitespace),
+ (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@[a-zA-Z_][\w.]*', Name.Decorator),
+ (r'(as|break|case|catch|default|do|else|extends|extension|finally|'
+ r'fires|for|if|implements|instanceof|new|on|requires|return|super|'
+ r'switch|throw|throws|try|typeof|uses|while|with)\b',
+ Keyword),
+ (r'(abstract|def|dispatch|final|native|override|private|protected|'
+ r'public|static|strictfp|synchronized|transient|val|var|volatile)\b',
+ Keyword.Declaration),
+ (r'(boolean|byte|char|double|float|int|long|short|void)\b',
+ Keyword.Type),
+ (r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
+ (r'(false|it|null|occurrence|this|true|void)\b', Keyword.Constant),
+ (r'(agent|annotation|artifact|behavior|capacity|class|enum|event|'
+ r'interface|skill|space)(\s+)', bygroups(Keyword.Declaration, Whitespace),
+ 'class'),
+ (r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[a-zA-Z_]\w*:', Name.Label),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Whitespace)
+ ],
+ 'class': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[\w.]+\*?', Name.Namespace, '#pop')
+ ],
+ }
diff --git a/pygments/lexers/kuin.py b/pygments/lexers/kuin.py
new file mode 100644
index 0000000..8dd7e70
--- /dev/null
+++ b/pygments/lexers/kuin.py
@@ -0,0 +1,333 @@
+"""
+ pygments.lexers.kuin
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Kuin language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, using, this, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['KuinLexer']
+
+
+class KuinLexer(RegexLexer):
+ """
+ For Kuin source code.
+
+ .. versionadded:: 2.9
+ """
+ name = 'Kuin'
+ url = 'https://github.com/kuina/Kuin'
+ aliases = ['kuin']
+ filenames = ['*.kn']
+
+ tokens = {
+ 'root': [
+ include('statement'),
+ ],
+ 'statement': [
+ # Whitespace / Comment
+ include('whitespace'),
+
+ # Block-statement
+ (r'(\+?)([ \t]*)(\*?)([ \t]*)(\bfunc)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)',
+ bygroups(Keyword,Whitespace, Keyword, Whitespace, Keyword,
+ using(this), Name.Function), 'func_'),
+ (r'\b(class)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)',
+ bygroups(Keyword, using(this), Name.Class), 'class_'),
+ (r'\b(enum)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)',
+ bygroups(Keyword, using(this), Name.Constant), 'enum_'),
+ (r'\b(block)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'block_'),
+ (r'\b(ifdef)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'ifdef_'),
+ (r'\b(if)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'if_'),
+ (r'\b(switch)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'switch_'),
+ (r'\b(while)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'while_'),
+ (r'\b(for)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'for_'),
+ (r'\b(foreach)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'foreach_'),
+ (r'\b(try)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
+ bygroups(Keyword, using(this), Name.Other), 'try_'),
+
+ # Line-statement
+ (r'\b(do)\b', Keyword, 'do'),
+ (r'(\+?[ \t]*\bvar)\b', Keyword, 'var'),
+ (r'\b(const)\b', Keyword, 'const'),
+ (r'\b(ret)\b', Keyword, 'ret'),
+ (r'\b(throw)\b', Keyword, 'throw'),
+ (r'\b(alias)\b', Keyword, 'alias'),
+ (r'\b(assert)\b', Keyword, 'assert'),
+ (r'\|', Text, 'continued_line'),
+ (r'[ \t]*\n', Whitespace),
+ ],
+
+ # Whitespace / Comment
+ 'whitespace': [
+ (r'^([ \t]*)(;.*)', bygroups(Comment.Single, Whitespace)),
+ (r'[ \t]+(?![; \t])', Whitespace),
+ (r'\{', Comment.Multiline, 'multiline_comment'),
+ ],
+ 'multiline_comment': [
+ (r'\{', Comment.Multiline, 'multiline_comment'),
+ (r'(?:\s*;.*|[^{}\n]+)', Comment.Multiline),
+ (r'\n', Comment.Multiline),
+ (r'\}', Comment.Multiline, '#pop'),
+ ],
+
+ # Block-statement
+ 'func_': [
+ include('expr'),
+ (r'\n', Whitespace, 'func'),
+ ],
+ 'func': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(func)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('statement'),
+ ],
+ 'class_': [
+ include('expr'),
+ (r'\n', Whitespace, 'class'),
+ ],
+ 'class': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(class)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('statement'),
+ ],
+ 'enum_': [
+ include('expr'),
+ (r'\n', Whitespace, 'enum'),
+ ],
+ 'enum': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(enum)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('expr'),
+ (r'\n', Whitespace),
+ ],
+ 'block_': [
+ include('expr'),
+ (r'\n', Whitespace, 'block'),
+ ],
+ 'block': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(block)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'ifdef_': [
+ include('expr'),
+ (r'\n', Whitespace, 'ifdef'),
+ ],
+ 'ifdef': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(ifdef)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ (words(('rls', 'dbg'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Constant, 'ifdef_sp'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'ifdef_sp': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'if_': [
+ include('expr'),
+ (r'\n', Whitespace, 'if'),
+ ],
+ 'if': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(if)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ (words(('elif', 'else'), prefix=r'\b', suffix=r'\b'), Keyword, 'if_sp'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'if_sp': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'switch_': [
+ include('expr'),
+ (r'\n', Whitespace, 'switch'),
+ ],
+ 'switch': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(switch)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ (words(('case', 'default', 'to'), prefix=r'\b', suffix=r'\b'),
+ Keyword, 'switch_sp'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'switch_sp': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'while_': [
+ include('expr'),
+ (r'\n', Whitespace, 'while'),
+ ],
+ 'while': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(while)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'for_': [
+ include('expr'),
+ (r'\n', Whitespace, 'for'),
+ ],
+ 'for': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(for)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'foreach_': [
+ include('expr'),
+ (r'\n', Whitespace, 'foreach'),
+ ],
+ 'foreach': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(foreach)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'try_': [
+ include('expr'),
+ (r'\n', Whitespace, 'try'),
+ ],
+ 'try': [
+ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(try)\b',
+ bygroups(Keyword, using(this), Keyword), '#pop:2'),
+ (words(('catch', 'finally', 'to'), prefix=r'\b', suffix=r'\b'),
+ Keyword, 'try_sp'),
+ include('statement'),
+ include('break'),
+ include('skip'),
+ ],
+ 'try_sp': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+
+ # Line-statement
+ 'break': [
+ (r'\b(break)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)',
+ bygroups(Keyword, using(this), Name.Other)),
+ ],
+ 'skip': [
+ (r'\b(skip)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)',
+ bygroups(Keyword, using(this), Name.Other)),
+ ],
+ 'alias': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'assert': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'const': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'do': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'ret': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'throw': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'var': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+ 'continued_line': [
+ include('expr'),
+ (r'\n', Whitespace, '#pop'),
+ ],
+
+ 'expr': [
+ # Whitespace / Comment
+ include('whitespace'),
+
+ # Punctuation
+ (r'\(', Punctuation,),
+ (r'\)', Punctuation,),
+ (r'\[', Punctuation,),
+ (r'\]', Punctuation,),
+ (r',', Punctuation),
+
+ # Keyword
+ (words((
+ 'true', 'false', 'null', 'inf'
+ ), prefix=r'\b', suffix=r'\b'), Keyword.Constant),
+ (words((
+ 'me'
+ ), prefix=r'\b', suffix=r'\b'), Keyword),
+ (words((
+ 'bit16', 'bit32', 'bit64', 'bit8', 'bool',
+ 'char', 'class', 'dict', 'enum', 'float', 'func',
+ 'int', 'list', 'queue', 'stack'
+ ), prefix=r'\b', suffix=r'\b'), Keyword.Type),
+
+ # Number
+ (r'\b[0-9]\.[0-9]+(?!\.)(:?e[\+-][0-9]+)?\b', Number.Float),
+ (r'\b2#[01]+(?:b(?:8|16|32|64))?\b', Number.Bin),
+ (r'\b8#[0-7]+(?:b(?:8|16|32|64))?\b', Number.Oct),
+ (r'\b16#[0-9A-F]+(?:b(?:8|16|32|64))?\b', Number.Hex),
+ (r'\b[0-9]+(?:b(?:8|16|32|64))?\b', Number.Decimal),
+
+ # String / Char
+ (r'"', String.Double, 'string'),
+ (r"'(?:\\.|.)+?'", String.Char),
+
+ # Operator
+ (r'(?:\.|\$(?:>|<)?)', Operator),
+ (r'(?:\^)', Operator),
+ (r'(?:\+|-|!|##?)', Operator),
+ (r'(?:\*|/|%)', Operator),
+ (r'(?:~)', Operator),
+ (r'(?:(?:=|<>)(?:&|\$)?|<=?|>=?)', Operator),
+ (r'(?:&)', Operator),
+ (r'(?:\|)', Operator),
+ (r'(?:\?)', Operator),
+ (r'(?::(?::|\+|-|\*|/|%|\^|~)?)', Operator),
+
+ # Identifier
+ (r"\b([a-zA-Z_][0-9a-zA-Z_]*)(?=@)\b", Name),
+ (r"(@)?\b([a-zA-Z_][0-9a-zA-Z_]*)\b",
+ bygroups(Name.Other, Name.Variable)),
+ ],
+
+ # String
+ 'string': [
+ (r'(?:\\[^{\n]|[^"\\])+', String.Double),
+ (r'\\\{', String.Double, 'toStrInString'),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'toStrInString': [
+ include('expr'),
+ (r'\}', String.Double, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/lilypond.py b/pygments/lexers/lilypond.py
new file mode 100644
index 0000000..52a007d
--- /dev/null
+++ b/pygments/lexers/lilypond.py
@@ -0,0 +1,226 @@
+"""
+ pygments.lexers.lilypond
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for LilyPond.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import bygroups, default, inherit, words
+from pygments.lexers.lisp import SchemeLexer
+from pygments.lexers._lilypond_builtins import (
+ keywords, pitch_language_names, clefs, scales, repeat_types, units,
+ chord_modifiers, pitches, music_functions, dynamics, articulations,
+ music_commands, markup_commands, grobs, translators, contexts,
+ context_properties, grob_properties, scheme_functions, paper_variables,
+ header_variables
+)
+from pygments.token import Token
+
+__all__ = ["LilyPondLexer"]
+
+# In LilyPond, (unquoted) name tokens only contain letters, hyphens,
+# and underscores, where hyphens and underscores must not start or end
+# a name token.
+#
+# Note that many of the entities listed as LilyPond built-in keywords
+# (in file `_lilypond_builtins.py`) are only valid if surrounded by
+# double quotes, for example, 'hufnagel-fa1'. This means that
+# `NAME_END_RE` doesn't apply to such entities in valid LilyPond code.
+NAME_END_RE = r"(?=\d|[^\w\-]|[\-_][\W\d])"
+
+def builtin_words(names, backslash, suffix=NAME_END_RE):
+ prefix = r"[\-_^]?"
+ if backslash == "mandatory":
+ prefix += r"\\"
+ elif backslash == "optional":
+ prefix += r"\\?"
+ else:
+ assert backslash == "disallowed"
+ return words(names, prefix, suffix)
+
+
+class LilyPondLexer(SchemeLexer):
+ """
+ Lexer for input to LilyPond, a text-based music typesetter.
+
+ .. important::
+
+ This lexer is meant to be used in conjunction with the ``lilypond`` style.
+
+ .. versionadded:: 2.11
+ """
+ name = 'LilyPond'
+ url = 'https://lilypond.org'
+ aliases = ['lilypond']
+ filenames = ['*.ly']
+ mimetypes = []
+
+ flags = re.DOTALL | re.MULTILINE
+
+ # Because parsing LilyPond input is very tricky (and in fact
+ # impossible without executing LilyPond when there is Scheme
+ # code in the file), this lexer does not try to recognize
+ # lexical modes. Instead, it catches the most frequent pieces
+ # of syntax, and, above all, knows about many kinds of builtins.
+
+ # In order to parse embedded Scheme, this lexer subclasses the SchemeLexer.
+ # It redefines the 'root' state entirely, and adds a rule for #{ #}
+ # to the 'value' state. The latter is used to parse a Scheme expression
+ # after #.
+
+ def get_tokens_unprocessed(self, text):
+ """Highlight Scheme variables as LilyPond builtins when applicable."""
+ for index, token, value in super().get_tokens_unprocessed(text):
+ if token is Token.Name.Function or token is Token.Name.Variable:
+ if value in scheme_functions:
+ token = Token.Name.Builtin.SchemeFunction
+ elif token is Token.Name.Builtin:
+ token = Token.Name.Builtin.SchemeBuiltin
+ yield index, token, value
+
+ tokens = {
+ "root": [
+ # Whitespace.
+ (r"\s+", Token.Text.Whitespace),
+
+ # Multi-line comments. These are non-nestable.
+ (r"%\{.*?%\}", Token.Comment.Multiline),
+
+ # Simple comments.
+ (r"%.*?$", Token.Comment.Single),
+
+ # End of embedded LilyPond in Scheme.
+ (r"#\}", Token.Punctuation, "#pop"),
+
+ # Embedded Scheme, starting with # ("delayed"),
+ # or $ (immediate). #@ and and $@ are the lesser known
+ # "list splicing operators".
+ (r"[#$]@?", Token.Punctuation, "value"),
+
+ # Any kind of punctuation:
+ # - sequential music: { },
+ # - parallel music: << >>,
+ # - voice separator: << \\ >>,
+ # - chord: < >,
+ # - bar check: |,
+ # - dot in nested properties: \revert NoteHead.color,
+ # - equals sign in assignments and lists for various commands:
+ # \override Stem.color = red,
+ # - comma as alternative syntax for lists: \time 3,3,2 4/4,
+ # - colon in tremolos: c:32,
+ # - double hyphen and underscore in lyrics: li -- ly -- pond __
+ # (which must be preceded by ASCII whitespace)
+ (r"""(?x)
+ \\\\
+ | (?<= \s ) (?: -- | __ )
+ | [{}<>=.,:|]
+ """, Token.Punctuation),
+
+ # Pitches, with optional octavation marks, octave check,
+ # and forced or cautionary accidental.
+ (words(pitches, suffix=r"=?[',]*!?\??" + NAME_END_RE), Token.Pitch),
+
+ # Strings, optionally with direction specifier.
+ (r'[\-_^]?"', Token.String, "string"),
+
+ # Numbers.
+ (r"-?\d+\.\d+", Token.Number.Float), # 5. and .5 are not allowed
+ (r"-?\d+/\d+", Token.Number.Fraction),
+ # Integers, or durations with optional augmentation dots.
+ # We have no way to distinguish these, so we highlight
+ # them all as numbers.
+ #
+ # Normally, there is a space before the integer (being an
+ # argument to a music function), which we check here. The
+ # case without a space is handled below (as a fingering
+ # number).
+ (r"""(?x)
+ (?<= \s ) -\d+
+ | (?: (?: \d+ | \\breve | \\longa | \\maxima )
+ \.* )
+ """, Token.Number),
+ # Separates duration and duration multiplier highlighted as fraction.
+ (r"\*", Token.Number),
+
+ # Ties, slurs, manual beams.
+ (r"[~()[\]]", Token.Name.Builtin.Articulation),
+
+ # Predefined articulation shortcuts. A direction specifier is
+ # required here.
+ (r"[\-_^][>^_!.\-+]", Token.Name.Builtin.Articulation),
+
+ # Fingering numbers, string numbers.
+ (r"[\-_^]?\\?\d+", Token.Name.Builtin.Articulation),
+
+ # Builtins.
+ (builtin_words(keywords, "mandatory"), Token.Keyword),
+ (builtin_words(pitch_language_names, "disallowed"), Token.Name.PitchLanguage),
+ (builtin_words(clefs, "disallowed"), Token.Name.Builtin.Clef),
+ (builtin_words(scales, "mandatory"), Token.Name.Builtin.Scale),
+ (builtin_words(repeat_types, "disallowed"), Token.Name.Builtin.RepeatType),
+ (builtin_words(units, "mandatory"), Token.Number),
+ (builtin_words(chord_modifiers, "disallowed"), Token.ChordModifier),
+ (builtin_words(music_functions, "mandatory"), Token.Name.Builtin.MusicFunction),
+ (builtin_words(dynamics, "mandatory"), Token.Name.Builtin.Dynamic),
+ # Those like slurs that don't take a backslash are covered above.
+ (builtin_words(articulations, "mandatory"), Token.Name.Builtin.Articulation),
+ (builtin_words(music_commands, "mandatory"), Token.Name.Builtin.MusicCommand),
+ (builtin_words(markup_commands, "mandatory"), Token.Name.Builtin.MarkupCommand),
+ (builtin_words(grobs, "disallowed"), Token.Name.Builtin.Grob),
+ (builtin_words(translators, "disallowed"), Token.Name.Builtin.Translator),
+ # Optional backslash because of \layout { \context { \Score ... } }.
+ (builtin_words(contexts, "optional"), Token.Name.Builtin.Context),
+ (builtin_words(context_properties, "disallowed"), Token.Name.Builtin.ContextProperty),
+ (builtin_words(grob_properties, "disallowed"),
+ Token.Name.Builtin.GrobProperty,
+ "maybe-subproperties"),
+ # Optional backslashes here because output definitions are wrappers
+ # around modules. Concretely, you can do, e.g.,
+ # \paper { oddHeaderMarkup = \evenHeaderMarkup }
+ (builtin_words(paper_variables, "optional"), Token.Name.Builtin.PaperVariable),
+ (builtin_words(header_variables, "optional"), Token.Name.Builtin.HeaderVariable),
+
+ # Other backslashed-escaped names (like dereferencing a
+ # music variable), possibly with a direction specifier.
+ (r"[\-_^]?\\.+?" + NAME_END_RE, Token.Name.BackslashReference),
+
+ # Definition of a variable. Support assignments to alist keys
+ # (myAlist.my-key.my-nested-key = \markup \spam \eggs).
+ (r"""(?x)
+ (?: [^\W\d] | - )+
+ (?= (?: [^\W\d] | [\-.] )* \s* = )
+ """, Token.Name.Lvalue),
+
+ # Virtually everything can appear in markup mode, so we highlight
+ # as text. Try to get a complete word, or we might wrongly lex
+ # a suffix that happens to be a builtin as a builtin (e.g., "myStaff").
+ (r"([^\W\d]|-)+?" + NAME_END_RE, Token.Text),
+ (r".", Token.Text),
+ ],
+ "string": [
+ (r'"', Token.String, "#pop"),
+ (r'\\.', Token.String.Escape),
+ (r'[^\\"]+', Token.String),
+ ],
+ "value": [
+ # Scan a LilyPond value, then pop back since we had a
+ # complete expression.
+ (r"#\{", Token.Punctuation, ("#pop", "root")),
+ inherit,
+ ],
+ # Grob subproperties are undeclared and it would be tedious
+ # to maintain them by hand. Instead, this state allows recognizing
+ # everything that looks like a-known-property.foo.bar-baz as
+ # one single property name.
+ "maybe-subproperties": [
+ (r"\s+", Token.Text.Whitespace),
+ (r"(\.)((?:[^\W\d]|-)+?)" + NAME_END_RE,
+ bygroups(Token.Punctuation, Token.Name.Builtin.GrobProperty)),
+ default("#pop"),
+ ]
+ }
diff --git a/pygments/lexers/lisp.py b/pygments/lexers/lisp.py
new file mode 100644
index 0000000..2b91b99
--- /dev/null
+++ b/pygments/lexers/lisp.py
@@ -0,0 +1,2838 @@
+"""
+ pygments.lexers.lisp
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Lispy languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, words, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal, Error, Whitespace
+
+from pygments.lexers.python import PythonLexer
+
+from pygments.lexers._scheme_builtins import scheme_keywords, scheme_builtins
+
+__all__ = ['SchemeLexer', 'CommonLispLexer', 'HyLexer', 'RacketLexer',
+ 'NewLispLexer', 'EmacsLispLexer', 'ShenLexer', 'CPSALexer',
+ 'XtlangLexer', 'FennelLexer']
+
+
+class SchemeLexer(RegexLexer):
+ """
+ A Scheme lexer.
+
+ This parser is checked with pastes from the LISP pastebin
+ at http://paste.lisp.org/ to cover as much syntax as possible.
+
+ It supports the full Scheme syntax as defined in R5RS.
+
+ .. versionadded:: 0.6
+ """
+ name = 'Scheme'
+ url = 'http://www.scheme-reports.org/'
+ aliases = ['scheme', 'scm']
+ filenames = ['*.scm', '*.ss']
+ mimetypes = ['text/x-scheme', 'application/x-scheme']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ # valid names for identifiers
+ # well, names can only not consist fully of numbers
+ # but this should be good enough for now
+ valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
+
+ # Use within verbose regexes
+ token_end = r'''
+ (?=
+ \s # whitespace
+ | ; # comment
+ | \#[;|!] # fancy comments
+ | [)\]] # end delimiters
+ | $ # end of file
+ )
+ '''
+
+ # Recognizing builtins.
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in super().get_tokens_unprocessed(text):
+ if token is Name.Function or token is Name.Variable:
+ if value in scheme_keywords:
+ yield index, Keyword, value
+ elif value in scheme_builtins:
+ yield index, Name.Builtin, value
+ else:
+ yield index, token, value
+ else:
+ yield index, token, value
+
+ # Scheme has funky syntactic rules for numbers. These are all
+ # valid number literals: 5.0e55|14, 14/13, -1+5j, +1@5, #b110,
+ # #o#Iinf.0-nan.0i. This is adapted from the formal grammar given
+ # in http://www.r6rs.org/final/r6rs.pdf, section 4.2.1. Take a
+ # deep breath ...
+
+ # It would be simpler if we could just not bother about invalid
+ # numbers like #b35. But we cannot parse 'abcdef' without #x as a
+ # number.
+
+ number_rules = {}
+ for base in (2, 8, 10, 16):
+ if base == 2:
+ digit = r'[01]'
+ radix = r'( \#[bB] )'
+ elif base == 8:
+ digit = r'[0-7]'
+ radix = r'( \#[oO] )'
+ elif base == 10:
+ digit = r'[0-9]'
+ radix = r'( (\#[dD])? )'
+ elif base == 16:
+ digit = r'[0-9a-fA-F]'
+ radix = r'( \#[xX] )'
+
+ # Radix, optional exactness indicator.
+ prefix = rf'''
+ (
+ {radix} (\#[iIeE])?
+ | \#[iIeE] {radix}
+ )
+ '''
+
+ # Simple unsigned number or fraction.
+ ureal = rf'''
+ (
+ {digit}+
+ ( / {digit}+ )?
+ )
+ '''
+
+ # Add decimal numbers.
+ if base == 10:
+ decimal = r'''
+ (
+ # Decimal part
+ (
+ [0-9]+ ([.][0-9]*)?
+ | [.][0-9]+
+ )
+
+ # Optional exponent
+ (
+ [eEsSfFdDlL] [+-]? [0-9]+
+ )?
+
+ # Optional mantissa width
+ (
+ \|[0-9]+
+ )?
+ )
+ '''
+ ureal = rf'''
+ (
+ {decimal} (?!/)
+ | {ureal}
+ )
+ '''
+
+ naninf = r'(nan.0|inf.0)'
+
+ real = rf'''
+ (
+ [+-] {naninf} # Sign mandatory
+ | [+-]? {ureal} # Sign optional
+ )
+ '''
+
+ complex_ = rf'''
+ (
+ {real}? [+-] ({naninf}|{ureal})? i
+ | {real} (@ {real})?
+
+ )
+ '''
+
+ num = rf'''(?x)
+ (
+ {prefix}
+ {complex_}
+ )
+ # Need to ensure we have a full token. 1+ is not a
+ # number followed by something else, but a function
+ # name.
+ {token_end}
+ '''
+
+ number_rules[base] = num
+
+ # If you have a headache now, say thanks to RnRS editors.
+
+ # Doing it this way is simpler than splitting the number(10)
+ # regex in a floating-point and a no-floating-point version.
+ def decimal_cb(self, match):
+ if '.' in match.group():
+ token_type = Number.Float # includes [+-](inf|nan).0
+ else:
+ token_type = Number.Integer
+ yield match.start(), token_type, match.group()
+
+ # --
+
+ # The 'scheme-root' state parses as many expressions as needed, always
+ # delegating to the 'scheme-value' state. The latter parses one complete
+ # expression and immediately pops back. This is needed for the LilyPondLexer.
+ # When LilyPond encounters a #, it starts parsing embedded Scheme code, and
+ # returns to normal syntax after one expression. We implement this
+ # by letting the LilyPondLexer subclass the SchemeLexer. When it finds
+ # the #, the LilyPondLexer goes to the 'value' state, which then pops back
+ # to LilyPondLexer. The 'root' state of the SchemeLexer merely delegates the
+ # work to 'scheme-root'; this is so that LilyPondLexer can inherit
+ # 'scheme-root' and redefine 'root'.
+
+ tokens = {
+ 'root': [
+ default('scheme-root'),
+ ],
+ 'scheme-root': [
+ default('value'),
+ ],
+ 'value': [
+ # the comments
+ # and going to the end of the line
+ (r';.*?$', Comment.Single),
+ # multi-line comment
+ (r'#\|', Comment.Multiline, 'multiline-comment'),
+ # commented form (entire sexpr following)
+ (r'#;[([]', Comment, 'commented-form'),
+ # commented datum
+ (r'#;', Comment, 'commented-datum'),
+ # signifies that the program text that follows is written with the
+ # lexical and datum syntax described in r6rs
+ (r'#!r6rs', Comment),
+
+ # whitespaces - usually not relevant
+ (r'\s+', Whitespace),
+
+ # numbers
+ (number_rules[2], Number.Bin, '#pop'),
+ (number_rules[8], Number.Oct, '#pop'),
+ (number_rules[10], decimal_cb, '#pop'),
+ (number_rules[16], Number.Hex, '#pop'),
+
+ # strings, symbols, keywords and characters
+ (r'"', String, 'string'),
+ (r"'" + valid_name, String.Symbol, "#pop"),
+ (r'#:' + valid_name, Keyword.Declaration, '#pop'),
+ (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char, "#pop"),
+
+ # constants
+ (r'(#t|#f)', Name.Constant, '#pop'),
+
+ # special operators
+ (r"('|#|`|,@|,|\.)", Operator),
+
+ # first variable in a quoted string like
+ # '(this is syntactic sugar)
+ (r"(?<='\()" + valid_name, Name.Variable, '#pop'),
+ (r"(?<=#\()" + valid_name, Name.Variable, '#pop'),
+
+ # Functions -- note that this also catches variables
+ # defined in let/let*, but there is little that can
+ # be done about it.
+ (r'(?<=\()' + valid_name, Name.Function, '#pop'),
+
+ # find the remaining variables
+ (valid_name, Name.Variable, '#pop'),
+
+ # the famous parentheses!
+
+ # Push scheme-root to enter a state that will parse as many things
+ # as needed in the parentheses.
+ (r'[([]', Punctuation, 'scheme-root'),
+ # Pop one 'value', one 'scheme-root', and yet another 'value', so
+ # we get back to a state parsing expressions as needed in the
+ # enclosing context.
+ (r'[)\]]', Punctuation, '#pop:3'),
+ ],
+ 'multiline-comment': [
+ (r'#\|', Comment.Multiline, '#push'),
+ (r'\|#', Comment.Multiline, '#pop'),
+ (r'[^|#]+', Comment.Multiline),
+ (r'[|#]', Comment.Multiline),
+ ],
+ 'commented-form': [
+ (r'[([]', Comment, '#push'),
+ (r'[)\]]', Comment, '#pop'),
+ (r'[^()[\]]+', Comment),
+ ],
+ 'commented-datum': [
+ (rf'(?x).*?{token_end}', Comment, '#pop'),
+ ],
+ 'string': [
+ # Pops back from 'string', and pops 'value' as well.
+ ('"', String, '#pop:2'),
+ # Hex escape sequences, R6RS-style.
+ (r'\\x[0-9a-fA-F]+;', String.Escape),
+ # We try R6RS style first, but fall back to Guile-style.
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ # Other special escape sequences implemented by Guile.
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\U[0-9a-fA-F]{6}', String.Escape),
+ # Escape sequences are not overly standardized. Recognizing
+ # a single character after the backslash should be good enough.
+ # NB: we have DOTALL.
+ (r'\\.', String.Escape),
+ # The rest
+ (r'[^\\"]+', String),
+ ]
+ }
+
+
+class CommonLispLexer(RegexLexer):
+ """
+ A Common Lisp lexer.
+
+ .. versionadded:: 0.9
+ """
+ name = 'Common Lisp'
+ url = 'https://lisp-lang.org/'
+ aliases = ['common-lisp', 'cl', 'lisp']
+ filenames = ['*.cl', '*.lisp']
+ mimetypes = ['text/x-common-lisp']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ # couple of useful regexes
+
+ # characters that are not macro-characters and can be used to begin a symbol
+ nonmacro = r'\\.|[\w!$%&*+-/<=>?@\[\]^{}~]'
+ constituent = nonmacro + '|[#.:]'
+ terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
+
+ # symbol token, reverse-engineered from hyperspec
+ # Take a deep breath...
+ symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
+
+ def __init__(self, **options):
+ from pygments.lexers._cl_builtins import BUILTIN_FUNCTIONS, \
+ SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
+ BUILTIN_TYPES, BUILTIN_CLASSES
+ self.builtin_function = BUILTIN_FUNCTIONS
+ self.special_forms = SPECIAL_FORMS
+ self.macros = MACROS
+ self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
+ self.declarations = DECLARATIONS
+ self.builtin_types = BUILTIN_TYPES
+ self.builtin_classes = BUILTIN_CLASSES
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ stack = ['root']
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name.Variable:
+ if value in self.builtin_function:
+ yield index, Name.Builtin, value
+ continue
+ if value in self.special_forms:
+ yield index, Keyword, value
+ continue
+ if value in self.macros:
+ yield index, Name.Builtin, value
+ continue
+ if value in self.lambda_list_keywords:
+ yield index, Keyword, value
+ continue
+ if value in self.declarations:
+ yield index, Keyword, value
+ continue
+ if value in self.builtin_types:
+ yield index, Keyword.Type, value
+ continue
+ if value in self.builtin_classes:
+ yield index, Name.Class, value
+ continue
+ yield index, token, value
+
+ tokens = {
+ 'root': [
+ default('body'),
+ ],
+ 'multiline-comment': [
+ (r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
+ (r'\|#', Comment.Multiline, '#pop'),
+ (r'[^|#]+', Comment.Multiline),
+ (r'[|#]', Comment.Multiline),
+ ],
+ 'commented-form': [
+ (r'\(', Comment.Preproc, '#push'),
+ (r'\)', Comment.Preproc, '#pop'),
+ (r'[^()]+', Comment.Preproc),
+ ],
+ 'body': [
+ # whitespace
+ (r'\s+', Whitespace),
+
+ # single-line comment
+ (r';.*$', Comment.Single),
+
+ # multi-line comment
+ (r'#\|', Comment.Multiline, 'multiline-comment'),
+
+ # encoding comment (?)
+ (r'#\d*Y.*$', Comment.Special),
+
+ # strings and characters
+ (r'"(\\.|\\\n|[^"\\])*"', String),
+ # quoting
+ (r":" + symbol, String.Symbol),
+ (r"::" + symbol, String.Symbol),
+ (r":#" + symbol, String.Symbol),
+ (r"'" + symbol, String.Symbol),
+ (r"'", Operator),
+ (r"`", Operator),
+
+ # decimal numbers
+ (r'[-+]?\d+\.?' + terminated, Number.Integer),
+ (r'[-+]?\d+/\d+' + terminated, Number),
+ (r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
+ terminated, Number.Float),
+
+ # sharpsign strings and characters
+ (r"#\\." + terminated, String.Char),
+ (r"#\\" + symbol, String.Char),
+
+ # vector
+ (r'#\(', Operator, 'body'),
+
+ # bitstring
+ (r'#\d*\*[01]*', Literal.Other),
+
+ # uninterned symbol
+ (r'#:' + symbol, String.Symbol),
+
+ # read-time and load-time evaluation
+ (r'#[.,]', Operator),
+
+ # function shorthand
+ (r'#\'', Name.Function),
+
+ # binary rational
+ (r'#b[+-]?[01]+(/[01]+)?', Number.Bin),
+
+ # octal rational
+ (r'#o[+-]?[0-7]+(/[0-7]+)?', Number.Oct),
+
+ # hex rational
+ (r'#x[+-]?[0-9a-f]+(/[0-9a-f]+)?', Number.Hex),
+
+ # radix rational
+ (r'#\d+r[+-]?[0-9a-z]+(/[0-9a-z]+)?', Number),
+
+ # complex
+ (r'(#c)(\()', bygroups(Number, Punctuation), 'body'),
+
+ # array
+ (r'(#\d+a)(\()', bygroups(Literal.Other, Punctuation), 'body'),
+
+ # structure
+ (r'(#s)(\()', bygroups(Literal.Other, Punctuation), 'body'),
+
+ # path
+ (r'#p?"(\\.|[^"])*"', Literal.Other),
+
+ # reference
+ (r'#\d+=', Operator),
+ (r'#\d+#', Operator),
+
+ # read-time comment
+ (r'#+nil' + terminated + r'\s*\(', Comment.Preproc, 'commented-form'),
+
+ # read-time conditional
+ (r'#[+-]', Operator),
+
+ # special operators that should have been parsed already
+ (r'(,@|,|\.)', Operator),
+
+ # special constants
+ (r'(t|nil)' + terminated, Name.Constant),
+
+ # functions and variables
+ (r'\*' + symbol + r'\*', Name.Variable.Global),
+ (symbol, Name.Variable),
+
+ # parentheses
+ (r'\(', Punctuation, 'body'),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ }
+
+
+class HyLexer(RegexLexer):
+ """
+ Lexer for Hy source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Hy'
+ url = 'http://hylang.org/'
+ aliases = ['hylang']
+ filenames = ['*.hy']
+ mimetypes = ['text/x-hy', 'application/x-hy']
+
+ special_forms = (
+ 'cond', 'for', '->', '->>', 'car',
+ 'cdr', 'first', 'rest', 'let', 'when', 'unless',
+ 'import', 'do', 'progn', 'get', 'slice', 'assoc', 'with-decorator',
+ ',', 'list_comp', 'kwapply', '~', 'is', 'in', 'is-not', 'not-in',
+ 'quasiquote', 'unquote', 'unquote-splice', 'quote', '|', '<<=', '>>=',
+ 'foreach', 'while',
+ 'eval-and-compile', 'eval-when-compile'
+ )
+
+ declarations = (
+ 'def', 'defn', 'defun', 'defmacro', 'defclass', 'lambda', 'fn', 'setv'
+ )
+
+ hy_builtins = ()
+
+ hy_core = (
+ 'cycle', 'dec', 'distinct', 'drop', 'even?', 'filter', 'inc',
+ 'instance?', 'iterable?', 'iterate', 'iterator?', 'neg?',
+ 'none?', 'nth', 'numeric?', 'odd?', 'pos?', 'remove', 'repeat',
+ 'repeatedly', 'take', 'take_nth', 'take_while', 'zero?'
+ )
+
+ builtins = hy_builtins + hy_core
+
+ # valid names for identifiers
+ # well, names can only not consist fully of numbers
+ # but this should be good enough for now
+ valid_name = r'(?!#)[\w!$%*+<=>?/.#:-]+'
+
+ def _multi_escape(entries):
+ return words(entries, suffix=' ')
+
+ tokens = {
+ 'root': [
+ # the comments - always starting with semicolon
+ # and going to the end of the line
+ (r';.*$', Comment.Single),
+
+ # whitespaces - usually not relevant
+ (r',+', Text),
+ (r'\s+', Whitespace),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"'" + valid_name, String.Symbol),
+ (r"\\(.|[a-z]+)", String.Char),
+ (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
+ (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
+
+ # keywords
+ (r'::?' + valid_name, String.Symbol),
+
+ # special operators
+ (r'~@|[`\'#^~&@]', Operator),
+
+ include('py-keywords'),
+ include('py-builtins'),
+
+ # highlight the special forms
+ (_multi_escape(special_forms), Keyword),
+
+ # Technically, only the special forms are 'keywords'. The problem
+ # is that only treating them as keywords means that things like
+ # 'defn' and 'ns' need to be highlighted as builtins. This is ugly
+ # and weird for most styles. So, as a compromise we're going to
+ # highlight them as Keyword.Declarations.
+ (_multi_escape(declarations), Keyword.Declaration),
+
+ # highlight the builtins
+ (_multi_escape(builtins), Name.Builtin),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Function),
+
+ # find the remaining variables
+ (valid_name, Name.Variable),
+
+ # Hy accepts vector notation
+ (r'(\[|\])', Punctuation),
+
+ # Hy accepts map notation
+ (r'(\{|\})', Punctuation),
+
+ # the famous parentheses!
+ (r'(\(|\))', Punctuation),
+
+ ],
+ 'py-keywords': PythonLexer.tokens['keywords'],
+ 'py-builtins': PythonLexer.tokens['builtins'],
+ }
+
+ def analyse_text(text):
+ if '(import ' in text or '(defn ' in text:
+ return 0.9
+
+
+class RacketLexer(RegexLexer):
+ """
+ Lexer for Racket source code (formerly
+ known as PLT Scheme).
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Racket'
+ url = 'http://racket-lang.org/'
+ aliases = ['racket', 'rkt']
+ filenames = ['*.rkt', '*.rktd', '*.rktl']
+ mimetypes = ['text/x-racket', 'application/x-racket']
+
+ # Generated by example.rkt
+ _keywords = (
+ '#%app', '#%datum', '#%declare', '#%expression', '#%module-begin',
+ '#%plain-app', '#%plain-lambda', '#%plain-module-begin',
+ '#%printing-module-begin', '#%provide', '#%require',
+ '#%stratified-body', '#%top', '#%top-interaction',
+ '#%variable-reference', '->', '->*', '->*m', '->d', '->dm', '->i',
+ '->m', '...', ':do-in', '==', '=>', '_', 'absent', 'abstract',
+ 'all-defined-out', 'all-from-out', 'and', 'any', 'augment', 'augment*',
+ 'augment-final', 'augment-final*', 'augride', 'augride*', 'begin',
+ 'begin-for-syntax', 'begin0', 'case', 'case->', 'case->m',
+ 'case-lambda', 'class', 'class*', 'class-field-accessor',
+ 'class-field-mutator', 'class/c', 'class/derived', 'combine-in',
+ 'combine-out', 'command-line', 'compound-unit', 'compound-unit/infer',
+ 'cond', 'cons/dc', 'contract', 'contract-out', 'contract-struct',
+ 'contracted', 'define', 'define-compound-unit',
+ 'define-compound-unit/infer', 'define-contract-struct',
+ 'define-custom-hash-types', 'define-custom-set-types',
+ 'define-for-syntax', 'define-local-member-name', 'define-logger',
+ 'define-match-expander', 'define-member-name',
+ 'define-module-boundary-contract', 'define-namespace-anchor',
+ 'define-opt/c', 'define-sequence-syntax', 'define-serializable-class',
+ 'define-serializable-class*', 'define-signature',
+ 'define-signature-form', 'define-struct', 'define-struct/contract',
+ 'define-struct/derived', 'define-syntax', 'define-syntax-rule',
+ 'define-syntaxes', 'define-unit', 'define-unit-binding',
+ 'define-unit-from-context', 'define-unit/contract',
+ 'define-unit/new-import-export', 'define-unit/s', 'define-values',
+ 'define-values-for-export', 'define-values-for-syntax',
+ 'define-values/invoke-unit', 'define-values/invoke-unit/infer',
+ 'define/augment', 'define/augment-final', 'define/augride',
+ 'define/contract', 'define/final-prop', 'define/match',
+ 'define/overment', 'define/override', 'define/override-final',
+ 'define/private', 'define/public', 'define/public-final',
+ 'define/pubment', 'define/subexpression-pos-prop',
+ 'define/subexpression-pos-prop/name', 'delay', 'delay/idle',
+ 'delay/name', 'delay/strict', 'delay/sync', 'delay/thread', 'do',
+ 'else', 'except', 'except-in', 'except-out', 'export', 'extends',
+ 'failure-cont', 'false', 'false/c', 'field', 'field-bound?', 'file',
+ 'flat-murec-contract', 'flat-rec-contract', 'for', 'for*', 'for*/and',
+ 'for*/async', 'for*/first', 'for*/fold', 'for*/fold/derived',
+ 'for*/hash', 'for*/hasheq', 'for*/hasheqv', 'for*/last', 'for*/list',
+ 'for*/lists', 'for*/mutable-set', 'for*/mutable-seteq',
+ 'for*/mutable-seteqv', 'for*/or', 'for*/product', 'for*/set',
+ 'for*/seteq', 'for*/seteqv', 'for*/stream', 'for*/sum', 'for*/vector',
+ 'for*/weak-set', 'for*/weak-seteq', 'for*/weak-seteqv', 'for-label',
+ 'for-meta', 'for-syntax', 'for-template', 'for/and', 'for/async',
+ 'for/first', 'for/fold', 'for/fold/derived', 'for/hash', 'for/hasheq',
+ 'for/hasheqv', 'for/last', 'for/list', 'for/lists', 'for/mutable-set',
+ 'for/mutable-seteq', 'for/mutable-seteqv', 'for/or', 'for/product',
+ 'for/set', 'for/seteq', 'for/seteqv', 'for/stream', 'for/sum',
+ 'for/vector', 'for/weak-set', 'for/weak-seteq', 'for/weak-seteqv',
+ 'gen:custom-write', 'gen:dict', 'gen:equal+hash', 'gen:set',
+ 'gen:stream', 'generic', 'get-field', 'hash/dc', 'if', 'implies',
+ 'import', 'include', 'include-at/relative-to',
+ 'include-at/relative-to/reader', 'include/reader', 'inherit',
+ 'inherit-field', 'inherit/inner', 'inherit/super', 'init',
+ 'init-depend', 'init-field', 'init-rest', 'inner', 'inspect',
+ 'instantiate', 'interface', 'interface*', 'invariant-assertion',
+ 'invoke-unit', 'invoke-unit/infer', 'lambda', 'lazy', 'let', 'let*',
+ 'let*-values', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc',
+ 'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes',
+ 'letrec-syntaxes+values', 'letrec-values', 'lib', 'link', 'local',
+ 'local-require', 'log-debug', 'log-error', 'log-fatal', 'log-info',
+ 'log-warning', 'match', 'match*', 'match*/derived', 'match-define',
+ 'match-define-values', 'match-lambda', 'match-lambda*',
+ 'match-lambda**', 'match-let', 'match-let*', 'match-let*-values',
+ 'match-let-values', 'match-letrec', 'match-letrec-values',
+ 'match/derived', 'match/values', 'member-name-key', 'mixin', 'module',
+ 'module*', 'module+', 'nand', 'new', 'nor', 'object-contract',
+ 'object/c', 'only', 'only-in', 'only-meta-in', 'open', 'opt/c', 'or',
+ 'overment', 'overment*', 'override', 'override*', 'override-final',
+ 'override-final*', 'parameterize', 'parameterize*',
+ 'parameterize-break', 'parametric->/c', 'place', 'place*',
+ 'place/context', 'planet', 'prefix', 'prefix-in', 'prefix-out',
+ 'private', 'private*', 'prompt-tag/c', 'protect-out', 'provide',
+ 'provide-signature-elements', 'provide/contract', 'public', 'public*',
+ 'public-final', 'public-final*', 'pubment', 'pubment*', 'quasiquote',
+ 'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax',
+ 'quote-syntax/prune', 'recontract-out', 'recursive-contract',
+ 'relative-in', 'rename', 'rename-in', 'rename-inner', 'rename-out',
+ 'rename-super', 'require', 'send', 'send*', 'send+', 'send-generic',
+ 'send/apply', 'send/keyword-apply', 'set!', 'set!-values',
+ 'set-field!', 'shared', 'stream', 'stream*', 'stream-cons', 'struct',
+ 'struct*', 'struct-copy', 'struct-field-index', 'struct-out',
+ 'struct/c', 'struct/ctc', 'struct/dc', 'submod', 'super',
+ 'super-instantiate', 'super-make-object', 'super-new', 'syntax',
+ 'syntax-case', 'syntax-case*', 'syntax-id-rules', 'syntax-rules',
+ 'syntax/loc', 'tag', 'this', 'this%', 'thunk', 'thunk*', 'time',
+ 'unconstrained-domain->', 'unit', 'unit-from-context', 'unit/c',
+ 'unit/new-import-export', 'unit/s', 'unless', 'unquote',
+ 'unquote-splicing', 'unsyntax', 'unsyntax-splicing', 'values/drop',
+ 'when', 'with-continuation-mark', 'with-contract',
+ 'with-contract-continuation-mark', 'with-handlers', 'with-handlers*',
+ 'with-method', 'with-syntax', 'λ'
+ )
+
+ # Generated by example.rkt
+ _builtins = (
+ '*', '*list/c', '+', '-', '/', '<', '</c', '<=', '<=/c', '=', '=/c',
+ '>', '>/c', '>=', '>=/c', 'abort-current-continuation', 'abs',
+ 'absolute-path?', 'acos', 'add-between', 'add1', 'alarm-evt',
+ 'always-evt', 'and/c', 'andmap', 'angle', 'any/c', 'append', 'append*',
+ 'append-map', 'apply', 'argmax', 'argmin', 'arithmetic-shift',
+ 'arity-at-least', 'arity-at-least-value', 'arity-at-least?',
+ 'arity-checking-wrapper', 'arity-includes?', 'arity=?',
+ 'arrow-contract-info', 'arrow-contract-info-accepts-arglist',
+ 'arrow-contract-info-chaperone-procedure',
+ 'arrow-contract-info-check-first-order', 'arrow-contract-info?',
+ 'asin', 'assf', 'assoc', 'assq', 'assv', 'atan',
+ 'bad-number-of-results', 'banner', 'base->-doms/c', 'base->-rngs/c',
+ 'base->?', 'between/c', 'bitwise-and', 'bitwise-bit-field',
+ 'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor',
+ 'blame-add-car-context', 'blame-add-cdr-context', 'blame-add-context',
+ 'blame-add-missing-party', 'blame-add-nth-arg-context',
+ 'blame-add-range-context', 'blame-add-unknown-context',
+ 'blame-context', 'blame-contract', 'blame-fmt->-string',
+ 'blame-missing-party?', 'blame-negative', 'blame-original?',
+ 'blame-positive', 'blame-replace-negative', 'blame-source',
+ 'blame-swap', 'blame-swapped?', 'blame-update', 'blame-value',
+ 'blame?', 'boolean=?', 'boolean?', 'bound-identifier=?', 'box',
+ 'box-cas!', 'box-immutable', 'box-immutable/c', 'box/c', 'box?',
+ 'break-enabled', 'break-parameterization?', 'break-thread',
+ 'build-chaperone-contract-property', 'build-compound-type-name',
+ 'build-contract-property', 'build-flat-contract-property',
+ 'build-list', 'build-path', 'build-path/convention-type',
+ 'build-string', 'build-vector', 'byte-pregexp', 'byte-pregexp?',
+ 'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes',
+ 'bytes->immutable-bytes', 'bytes->list', 'bytes->path',
+ 'bytes->path-element', 'bytes->string/latin-1', 'bytes->string/locale',
+ 'bytes->string/utf-8', 'bytes-append', 'bytes-append*',
+ 'bytes-close-converter', 'bytes-convert', 'bytes-convert-end',
+ 'bytes-converter?', 'bytes-copy', 'bytes-copy!',
+ 'bytes-environment-variable-name?', 'bytes-fill!', 'bytes-join',
+ 'bytes-length', 'bytes-no-nuls?', 'bytes-open-converter', 'bytes-ref',
+ 'bytes-set!', 'bytes-utf-8-index', 'bytes-utf-8-length',
+ 'bytes-utf-8-ref', 'bytes<?', 'bytes=?', 'bytes>?', 'bytes?', 'caaaar',
+ 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar',
+ 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr',
+ 'call-in-nested-thread', 'call-with-atomic-output-file',
+ 'call-with-break-parameterization',
+ 'call-with-composable-continuation', 'call-with-continuation-barrier',
+ 'call-with-continuation-prompt', 'call-with-current-continuation',
+ 'call-with-default-reading-parameterization',
+ 'call-with-escape-continuation', 'call-with-exception-handler',
+ 'call-with-file-lock/timeout', 'call-with-immediate-continuation-mark',
+ 'call-with-input-bytes', 'call-with-input-file',
+ 'call-with-input-file*', 'call-with-input-string',
+ 'call-with-output-bytes', 'call-with-output-file',
+ 'call-with-output-file*', 'call-with-output-string',
+ 'call-with-parameterization', 'call-with-semaphore',
+ 'call-with-semaphore/enable-break', 'call-with-values', 'call/cc',
+ 'call/ec', 'car', 'cartesian-product', 'cdaaar', 'cdaadr', 'cdaar',
+ 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', 'cddadr', 'cddar',
+ 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', 'ceiling', 'channel-get',
+ 'channel-put', 'channel-put-evt', 'channel-put-evt?',
+ 'channel-try-get', 'channel/c', 'channel?', 'chaperone-box',
+ 'chaperone-channel', 'chaperone-continuation-mark-key',
+ 'chaperone-contract-property?', 'chaperone-contract?', 'chaperone-evt',
+ 'chaperone-hash', 'chaperone-hash-set', 'chaperone-of?',
+ 'chaperone-procedure', 'chaperone-procedure*', 'chaperone-prompt-tag',
+ 'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector',
+ 'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?',
+ 'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?',
+ 'char-downcase', 'char-foldcase', 'char-general-category',
+ 'char-graphic?', 'char-in', 'char-in/c', 'char-iso-control?',
+ 'char-lower-case?', 'char-numeric?', 'char-punctuation?',
+ 'char-ready?', 'char-symbolic?', 'char-title-case?', 'char-titlecase',
+ 'char-upcase', 'char-upper-case?', 'char-utf-8-length',
+ 'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?',
+ 'char?', 'check-duplicate-identifier', 'check-duplicates',
+ 'checked-procedure-check-and-extract', 'choice-evt',
+ 'class->interface', 'class-info', 'class-seal', 'class-unseal',
+ 'class?', 'cleanse-path', 'close-input-port', 'close-output-port',
+ 'coerce-chaperone-contract', 'coerce-chaperone-contracts',
+ 'coerce-contract', 'coerce-contract/f', 'coerce-contracts',
+ 'coerce-flat-contract', 'coerce-flat-contracts', 'collect-garbage',
+ 'collection-file-path', 'collection-path', 'combinations', 'compile',
+ 'compile-allow-set!-undefined', 'compile-context-preservation-enabled',
+ 'compile-enforce-module-constants', 'compile-syntax',
+ 'compiled-expression-recompile', 'compiled-expression?',
+ 'compiled-module-expression?', 'complete-path?', 'complex?', 'compose',
+ 'compose1', 'conjoin', 'conjugate', 'cons', 'cons/c', 'cons?', 'const',
+ 'continuation-mark-key/c', 'continuation-mark-key?',
+ 'continuation-mark-set->context', 'continuation-mark-set->list',
+ 'continuation-mark-set->list*', 'continuation-mark-set-first',
+ 'continuation-mark-set?', 'continuation-marks',
+ 'continuation-prompt-available?', 'continuation-prompt-tag?',
+ 'continuation?', 'contract-continuation-mark-key',
+ 'contract-custom-write-property-proc', 'contract-exercise',
+ 'contract-first-order', 'contract-first-order-passes?',
+ 'contract-late-neg-projection', 'contract-name', 'contract-proc',
+ 'contract-projection', 'contract-property?',
+ 'contract-random-generate', 'contract-random-generate-fail',
+ 'contract-random-generate-fail?',
+ 'contract-random-generate-get-current-environment',
+ 'contract-random-generate-stash', 'contract-random-generate/choose',
+ 'contract-stronger?', 'contract-struct-exercise',
+ 'contract-struct-generate', 'contract-struct-late-neg-projection',
+ 'contract-struct-list-contract?', 'contract-val-first-projection',
+ 'contract?', 'convert-stream', 'copy-directory/files', 'copy-file',
+ 'copy-port', 'cos', 'cosh', 'count', 'current-blame-format',
+ 'current-break-parameterization', 'current-code-inspector',
+ 'current-command-line-arguments', 'current-compile',
+ 'current-compiled-file-roots', 'current-continuation-marks',
+ 'current-contract-region', 'current-custodian', 'current-directory',
+ 'current-directory-for-user', 'current-drive',
+ 'current-environment-variables', 'current-error-port', 'current-eval',
+ 'current-evt-pseudo-random-generator',
+ 'current-force-delete-permissions', 'current-future',
+ 'current-gc-milliseconds', 'current-get-interaction-input-port',
+ 'current-inexact-milliseconds', 'current-input-port',
+ 'current-inspector', 'current-library-collection-links',
+ 'current-library-collection-paths', 'current-load',
+ 'current-load-extension', 'current-load-relative-directory',
+ 'current-load/use-compiled', 'current-locale', 'current-logger',
+ 'current-memory-use', 'current-milliseconds',
+ 'current-module-declare-name', 'current-module-declare-source',
+ 'current-module-name-resolver', 'current-module-path-for-load',
+ 'current-namespace', 'current-output-port', 'current-parameterization',
+ 'current-plumber', 'current-preserved-thread-cell-values',
+ 'current-print', 'current-process-milliseconds', 'current-prompt-read',
+ 'current-pseudo-random-generator', 'current-read-interaction',
+ 'current-reader-guard', 'current-readtable', 'current-seconds',
+ 'current-security-guard', 'current-subprocess-custodian-mode',
+ 'current-thread', 'current-thread-group',
+ 'current-thread-initial-stack-size',
+ 'current-write-relative-directory', 'curry', 'curryr',
+ 'custodian-box-value', 'custodian-box?', 'custodian-limit-memory',
+ 'custodian-managed-list', 'custodian-memory-accounting-available?',
+ 'custodian-require-memory', 'custodian-shutdown-all', 'custodian?',
+ 'custom-print-quotable-accessor', 'custom-print-quotable?',
+ 'custom-write-accessor', 'custom-write-property-proc', 'custom-write?',
+ 'date', 'date*', 'date*-nanosecond', 'date*-time-zone-name', 'date*?',
+ 'date-day', 'date-dst?', 'date-hour', 'date-minute', 'date-month',
+ 'date-second', 'date-time-zone-offset', 'date-week-day', 'date-year',
+ 'date-year-day', 'date?', 'datum->syntax', 'datum-intern-literal',
+ 'default-continuation-prompt-tag', 'degrees->radians',
+ 'delete-directory', 'delete-directory/files', 'delete-file',
+ 'denominator', 'dict->list', 'dict-can-functional-set?',
+ 'dict-can-remove-keys?', 'dict-clear', 'dict-clear!', 'dict-copy',
+ 'dict-count', 'dict-empty?', 'dict-for-each', 'dict-has-key?',
+ 'dict-implements/c', 'dict-implements?', 'dict-iter-contract',
+ 'dict-iterate-first', 'dict-iterate-key', 'dict-iterate-next',
+ 'dict-iterate-value', 'dict-key-contract', 'dict-keys', 'dict-map',
+ 'dict-mutable?', 'dict-ref', 'dict-ref!', 'dict-remove',
+ 'dict-remove!', 'dict-set', 'dict-set!', 'dict-set*', 'dict-set*!',
+ 'dict-update', 'dict-update!', 'dict-value-contract', 'dict-values',
+ 'dict?', 'directory-exists?', 'directory-list', 'disjoin', 'display',
+ 'display-lines', 'display-lines-to-file', 'display-to-file',
+ 'displayln', 'double-flonum?', 'drop', 'drop-common-prefix',
+ 'drop-right', 'dropf', 'dropf-right', 'dump-memory-stats',
+ 'dup-input-port', 'dup-output-port', 'dynamic->*', 'dynamic-get-field',
+ 'dynamic-object/c', 'dynamic-place', 'dynamic-place*',
+ 'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-send',
+ 'dynamic-set-field!', 'dynamic-wind', 'eighth', 'empty',
+ 'empty-sequence', 'empty-stream', 'empty?',
+ 'environment-variables-copy', 'environment-variables-names',
+ 'environment-variables-ref', 'environment-variables-set!',
+ 'environment-variables?', 'eof', 'eof-evt', 'eof-object?',
+ 'ephemeron-value', 'ephemeron?', 'eprintf', 'eq-contract-val',
+ 'eq-contract?', 'eq-hash-code', 'eq?', 'equal-contract-val',
+ 'equal-contract?', 'equal-hash-code', 'equal-secondary-hash-code',
+ 'equal<%>', 'equal?', 'equal?/recur', 'eqv-hash-code', 'eqv?', 'error',
+ 'error-display-handler', 'error-escape-handler',
+ 'error-print-context-length', 'error-print-source-location',
+ 'error-print-width', 'error-value->string-handler', 'eval',
+ 'eval-jit-enabled', 'eval-syntax', 'even?', 'evt/c', 'evt?',
+ 'exact->inexact', 'exact-ceiling', 'exact-floor', 'exact-integer?',
+ 'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact-round',
+ 'exact-truncate', 'exact?', 'executable-yield-handler', 'exit',
+ 'exit-handler', 'exn', 'exn-continuation-marks', 'exn-message',
+ 'exn:break', 'exn:break-continuation', 'exn:break:hang-up',
+ 'exn:break:hang-up?', 'exn:break:terminate', 'exn:break:terminate?',
+ 'exn:break?', 'exn:fail', 'exn:fail:contract',
+ 'exn:fail:contract:arity', 'exn:fail:contract:arity?',
+ 'exn:fail:contract:blame', 'exn:fail:contract:blame-object',
+ 'exn:fail:contract:blame?', 'exn:fail:contract:continuation',
+ 'exn:fail:contract:continuation?', 'exn:fail:contract:divide-by-zero',
+ 'exn:fail:contract:divide-by-zero?',
+ 'exn:fail:contract:non-fixnum-result',
+ 'exn:fail:contract:non-fixnum-result?', 'exn:fail:contract:variable',
+ 'exn:fail:contract:variable-id', 'exn:fail:contract:variable?',
+ 'exn:fail:contract?', 'exn:fail:filesystem',
+ 'exn:fail:filesystem:errno', 'exn:fail:filesystem:errno-errno',
+ 'exn:fail:filesystem:errno?', 'exn:fail:filesystem:exists',
+ 'exn:fail:filesystem:exists?', 'exn:fail:filesystem:missing-module',
+ 'exn:fail:filesystem:missing-module-path',
+ 'exn:fail:filesystem:missing-module?', 'exn:fail:filesystem:version',
+ 'exn:fail:filesystem:version?', 'exn:fail:filesystem?',
+ 'exn:fail:network', 'exn:fail:network:errno',
+ 'exn:fail:network:errno-errno', 'exn:fail:network:errno?',
+ 'exn:fail:network?', 'exn:fail:object', 'exn:fail:object?',
+ 'exn:fail:out-of-memory', 'exn:fail:out-of-memory?', 'exn:fail:read',
+ 'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?',
+ 'exn:fail:read:non-char', 'exn:fail:read:non-char?', 'exn:fail:read?',
+ 'exn:fail:syntax', 'exn:fail:syntax-exprs',
+ 'exn:fail:syntax:missing-module',
+ 'exn:fail:syntax:missing-module-path',
+ 'exn:fail:syntax:missing-module?', 'exn:fail:syntax:unbound',
+ 'exn:fail:syntax:unbound?', 'exn:fail:syntax?', 'exn:fail:unsupported',
+ 'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?',
+ 'exn:fail?', 'exn:misc:match?', 'exn:missing-module-accessor',
+ 'exn:missing-module?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?',
+ 'exp', 'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once',
+ 'expand-syntax-to-top-form', 'expand-to-top-form', 'expand-user-path',
+ 'explode-path', 'expt', 'externalizable<%>', 'failure-result/c',
+ 'false?', 'field-names', 'fifth', 'file->bytes', 'file->bytes-lines',
+ 'file->lines', 'file->list', 'file->string', 'file->value',
+ 'file-exists?', 'file-name-from-path', 'file-or-directory-identity',
+ 'file-or-directory-modify-seconds', 'file-or-directory-permissions',
+ 'file-position', 'file-position*', 'file-size',
+ 'file-stream-buffer-mode', 'file-stream-port?', 'file-truncate',
+ 'filename-extension', 'filesystem-change-evt',
+ 'filesystem-change-evt-cancel', 'filesystem-change-evt?',
+ 'filesystem-root-list', 'filter', 'filter-map', 'filter-not',
+ 'filter-read-input-port', 'find-executable-path', 'find-files',
+ 'find-library-collection-links', 'find-library-collection-paths',
+ 'find-relative-path', 'find-system-path', 'findf', 'first',
+ 'first-or/c', 'fixnum?', 'flat-contract', 'flat-contract-predicate',
+ 'flat-contract-property?', 'flat-contract?', 'flat-named-contract',
+ 'flatten', 'floating-point-bytes->real', 'flonum?', 'floor',
+ 'flush-output', 'fold-files', 'foldl', 'foldr', 'for-each', 'force',
+ 'format', 'fourth', 'fprintf', 'free-identifier=?',
+ 'free-label-identifier=?', 'free-template-identifier=?',
+ 'free-transformer-identifier=?', 'fsemaphore-count', 'fsemaphore-post',
+ 'fsemaphore-try-wait?', 'fsemaphore-wait', 'fsemaphore?', 'future',
+ 'future?', 'futures-enabled?', 'gcd', 'generate-member-key',
+ 'generate-temporaries', 'generic-set?', 'generic?', 'gensym',
+ 'get-output-bytes', 'get-output-string', 'get-preference',
+ 'get/build-late-neg-projection', 'get/build-val-first-projection',
+ 'getenv', 'global-port-print-handler', 'group-by', 'group-execute-bit',
+ 'group-read-bit', 'group-write-bit', 'guard-evt', 'handle-evt',
+ 'handle-evt?', 'has-blame?', 'has-contract?', 'hash', 'hash->list',
+ 'hash-clear', 'hash-clear!', 'hash-copy', 'hash-copy-clear',
+ 'hash-count', 'hash-empty?', 'hash-eq?', 'hash-equal?', 'hash-eqv?',
+ 'hash-for-each', 'hash-has-key?', 'hash-iterate-first',
+ 'hash-iterate-key', 'hash-iterate-key+value', 'hash-iterate-next',
+ 'hash-iterate-pair', 'hash-iterate-value', 'hash-keys', 'hash-map',
+ 'hash-placeholder?', 'hash-ref', 'hash-ref!', 'hash-remove',
+ 'hash-remove!', 'hash-set', 'hash-set!', 'hash-set*', 'hash-set*!',
+ 'hash-update', 'hash-update!', 'hash-values', 'hash-weak?', 'hash/c',
+ 'hash?', 'hasheq', 'hasheqv', 'identifier-binding',
+ 'identifier-binding-symbol', 'identifier-label-binding',
+ 'identifier-prune-lexical-context',
+ 'identifier-prune-to-source-module',
+ 'identifier-remove-from-definition-context',
+ 'identifier-template-binding', 'identifier-transformer-binding',
+ 'identifier?', 'identity', 'if/c', 'imag-part', 'immutable?',
+ 'impersonate-box', 'impersonate-channel',
+ 'impersonate-continuation-mark-key', 'impersonate-hash',
+ 'impersonate-hash-set', 'impersonate-procedure',
+ 'impersonate-procedure*', 'impersonate-prompt-tag',
+ 'impersonate-struct', 'impersonate-vector', 'impersonator-contract?',
+ 'impersonator-ephemeron', 'impersonator-of?',
+ 'impersonator-prop:application-mark', 'impersonator-prop:blame',
+ 'impersonator-prop:contracted',
+ 'impersonator-property-accessor-procedure?', 'impersonator-property?',
+ 'impersonator?', 'implementation?', 'implementation?/c', 'in-bytes',
+ 'in-bytes-lines', 'in-combinations', 'in-cycle', 'in-dict',
+ 'in-dict-keys', 'in-dict-pairs', 'in-dict-values', 'in-directory',
+ 'in-hash', 'in-hash-keys', 'in-hash-pairs', 'in-hash-values',
+ 'in-immutable-hash', 'in-immutable-hash-keys',
+ 'in-immutable-hash-pairs', 'in-immutable-hash-values',
+ 'in-immutable-set', 'in-indexed', 'in-input-port-bytes',
+ 'in-input-port-chars', 'in-lines', 'in-list', 'in-mlist',
+ 'in-mutable-hash', 'in-mutable-hash-keys', 'in-mutable-hash-pairs',
+ 'in-mutable-hash-values', 'in-mutable-set', 'in-naturals',
+ 'in-parallel', 'in-permutations', 'in-port', 'in-producer', 'in-range',
+ 'in-sequences', 'in-set', 'in-slice', 'in-stream', 'in-string',
+ 'in-syntax', 'in-value', 'in-values*-sequence', 'in-values-sequence',
+ 'in-vector', 'in-weak-hash', 'in-weak-hash-keys', 'in-weak-hash-pairs',
+ 'in-weak-hash-values', 'in-weak-set', 'inexact->exact',
+ 'inexact-real?', 'inexact?', 'infinite?', 'input-port-append',
+ 'input-port?', 'inspector?', 'instanceof/c', 'integer->char',
+ 'integer->integer-bytes', 'integer-bytes->integer', 'integer-in',
+ 'integer-length', 'integer-sqrt', 'integer-sqrt/remainder', 'integer?',
+ 'interface->method-names', 'interface-extension?', 'interface?',
+ 'internal-definition-context-binding-identifiers',
+ 'internal-definition-context-introduce',
+ 'internal-definition-context-seal', 'internal-definition-context?',
+ 'is-a?', 'is-a?/c', 'keyword->string', 'keyword-apply', 'keyword<?',
+ 'keyword?', 'keywords-match', 'kill-thread', 'last', 'last-pair',
+ 'lcm', 'length', 'liberal-define-context?', 'link-exists?', 'list',
+ 'list*', 'list*of', 'list->bytes', 'list->mutable-set',
+ 'list->mutable-seteq', 'list->mutable-seteqv', 'list->set',
+ 'list->seteq', 'list->seteqv', 'list->string', 'list->vector',
+ 'list->weak-set', 'list->weak-seteq', 'list->weak-seteqv',
+ 'list-contract?', 'list-prefix?', 'list-ref', 'list-set', 'list-tail',
+ 'list-update', 'list/c', 'list?', 'listen-port-number?', 'listof',
+ 'load', 'load-extension', 'load-on-demand-enabled', 'load-relative',
+ 'load-relative-extension', 'load/cd', 'load/use-compiled',
+ 'local-expand', 'local-expand/capture-lifts',
+ 'local-transformer-expand', 'local-transformer-expand/capture-lifts',
+ 'locale-string-encoding', 'log', 'log-all-levels', 'log-level-evt',
+ 'log-level?', 'log-max-level', 'log-message', 'log-receiver?',
+ 'logger-name', 'logger?', 'magnitude', 'make-arity-at-least',
+ 'make-base-empty-namespace', 'make-base-namespace', 'make-bytes',
+ 'make-channel', 'make-chaperone-contract',
+ 'make-continuation-mark-key', 'make-continuation-prompt-tag',
+ 'make-contract', 'make-custodian', 'make-custodian-box',
+ 'make-custom-hash', 'make-custom-hash-types', 'make-custom-set',
+ 'make-custom-set-types', 'make-date', 'make-date*',
+ 'make-derived-parameter', 'make-directory', 'make-directory*',
+ 'make-do-sequence', 'make-empty-namespace',
+ 'make-environment-variables', 'make-ephemeron', 'make-exn',
+ 'make-exn:break', 'make-exn:break:hang-up', 'make-exn:break:terminate',
+ 'make-exn:fail', 'make-exn:fail:contract',
+ 'make-exn:fail:contract:arity', 'make-exn:fail:contract:blame',
+ 'make-exn:fail:contract:continuation',
+ 'make-exn:fail:contract:divide-by-zero',
+ 'make-exn:fail:contract:non-fixnum-result',
+ 'make-exn:fail:contract:variable', 'make-exn:fail:filesystem',
+ 'make-exn:fail:filesystem:errno', 'make-exn:fail:filesystem:exists',
+ 'make-exn:fail:filesystem:missing-module',
+ 'make-exn:fail:filesystem:version', 'make-exn:fail:network',
+ 'make-exn:fail:network:errno', 'make-exn:fail:object',
+ 'make-exn:fail:out-of-memory', 'make-exn:fail:read',
+ 'make-exn:fail:read:eof', 'make-exn:fail:read:non-char',
+ 'make-exn:fail:syntax', 'make-exn:fail:syntax:missing-module',
+ 'make-exn:fail:syntax:unbound', 'make-exn:fail:unsupported',
+ 'make-exn:fail:user', 'make-file-or-directory-link',
+ 'make-flat-contract', 'make-fsemaphore', 'make-generic',
+ 'make-handle-get-preference-locked', 'make-hash',
+ 'make-hash-placeholder', 'make-hasheq', 'make-hasheq-placeholder',
+ 'make-hasheqv', 'make-hasheqv-placeholder',
+ 'make-immutable-custom-hash', 'make-immutable-hash',
+ 'make-immutable-hasheq', 'make-immutable-hasheqv',
+ 'make-impersonator-property', 'make-input-port',
+ 'make-input-port/read-to-peek', 'make-inspector',
+ 'make-keyword-procedure', 'make-known-char-range-list',
+ 'make-limited-input-port', 'make-list', 'make-lock-file-name',
+ 'make-log-receiver', 'make-logger', 'make-mixin-contract',
+ 'make-mutable-custom-set', 'make-none/c', 'make-object',
+ 'make-output-port', 'make-parameter', 'make-parent-directory*',
+ 'make-phantom-bytes', 'make-pipe', 'make-pipe-with-specials',
+ 'make-placeholder', 'make-plumber', 'make-polar', 'make-prefab-struct',
+ 'make-primitive-class', 'make-proj-contract',
+ 'make-pseudo-random-generator', 'make-reader-graph', 'make-readtable',
+ 'make-rectangular', 'make-rename-transformer',
+ 'make-resolved-module-path', 'make-security-guard', 'make-semaphore',
+ 'make-set!-transformer', 'make-shared-bytes', 'make-sibling-inspector',
+ 'make-special-comment', 'make-srcloc', 'make-string',
+ 'make-struct-field-accessor', 'make-struct-field-mutator',
+ 'make-struct-type', 'make-struct-type-property',
+ 'make-syntax-delta-introducer', 'make-syntax-introducer',
+ 'make-temporary-file', 'make-tentative-pretty-print-output-port',
+ 'make-thread-cell', 'make-thread-group', 'make-vector',
+ 'make-weak-box', 'make-weak-custom-hash', 'make-weak-custom-set',
+ 'make-weak-hash', 'make-weak-hasheq', 'make-weak-hasheqv',
+ 'make-will-executor', 'map', 'match-equality-test',
+ 'matches-arity-exactly?', 'max', 'mcar', 'mcdr', 'mcons', 'member',
+ 'member-name-key-hash-code', 'member-name-key=?', 'member-name-key?',
+ 'memf', 'memq', 'memv', 'merge-input', 'method-in-interface?', 'min',
+ 'mixin-contract', 'module->exports', 'module->imports',
+ 'module->language-info', 'module->namespace',
+ 'module-compiled-cross-phase-persistent?', 'module-compiled-exports',
+ 'module-compiled-imports', 'module-compiled-language-info',
+ 'module-compiled-name', 'module-compiled-submodules',
+ 'module-declared?', 'module-path-index-join',
+ 'module-path-index-resolve', 'module-path-index-split',
+ 'module-path-index-submodule', 'module-path-index?', 'module-path?',
+ 'module-predefined?', 'module-provide-protected?', 'modulo', 'mpair?',
+ 'mutable-set', 'mutable-seteq', 'mutable-seteqv', 'n->th',
+ 'nack-guard-evt', 'namespace-anchor->empty-namespace',
+ 'namespace-anchor->namespace', 'namespace-anchor?',
+ 'namespace-attach-module', 'namespace-attach-module-declaration',
+ 'namespace-base-phase', 'namespace-mapped-symbols',
+ 'namespace-module-identifier', 'namespace-module-registry',
+ 'namespace-require', 'namespace-require/constant',
+ 'namespace-require/copy', 'namespace-require/expansion-time',
+ 'namespace-set-variable-value!', 'namespace-symbol->identifier',
+ 'namespace-syntax-introduce', 'namespace-undefine-variable!',
+ 'namespace-unprotect-module', 'namespace-variable-value', 'namespace?',
+ 'nan?', 'natural-number/c', 'negate', 'negative?', 'never-evt',
+ 'new-∀/c', 'new-∃/c', 'newline', 'ninth', 'non-empty-listof',
+ 'non-empty-string?', 'none/c', 'normal-case-path', 'normalize-arity',
+ 'normalize-path', 'normalized-arity?', 'not', 'not/c', 'null', 'null?',
+ 'number->string', 'number?', 'numerator', 'object%', 'object->vector',
+ 'object-info', 'object-interface', 'object-method-arity-includes?',
+ 'object-name', 'object-or-false=?', 'object=?', 'object?', 'odd?',
+ 'one-of/c', 'open-input-bytes', 'open-input-file',
+ 'open-input-output-file', 'open-input-string', 'open-output-bytes',
+ 'open-output-file', 'open-output-nowhere', 'open-output-string',
+ 'or/c', 'order-of-magnitude', 'ormap', 'other-execute-bit',
+ 'other-read-bit', 'other-write-bit', 'output-port?', 'pair?',
+ 'parameter-procedure=?', 'parameter/c', 'parameter?',
+ 'parameterization?', 'parse-command-line', 'partition', 'path->bytes',
+ 'path->complete-path', 'path->directory-path', 'path->string',
+ 'path-add-suffix', 'path-convention-type', 'path-element->bytes',
+ 'path-element->string', 'path-element?', 'path-for-some-system?',
+ 'path-list-string->path-list', 'path-only', 'path-replace-suffix',
+ 'path-string?', 'path<?', 'path?', 'pathlist-closure', 'peek-byte',
+ 'peek-byte-or-special', 'peek-bytes', 'peek-bytes!', 'peek-bytes!-evt',
+ 'peek-bytes-avail!', 'peek-bytes-avail!*', 'peek-bytes-avail!-evt',
+ 'peek-bytes-avail!/enable-break', 'peek-bytes-evt', 'peek-char',
+ 'peek-char-or-special', 'peek-string', 'peek-string!',
+ 'peek-string!-evt', 'peek-string-evt', 'peeking-input-port',
+ 'permutations', 'phantom-bytes?', 'pi', 'pi.f', 'pipe-content-length',
+ 'place-break', 'place-channel', 'place-channel-get',
+ 'place-channel-put', 'place-channel-put/get', 'place-channel?',
+ 'place-dead-evt', 'place-enabled?', 'place-kill', 'place-location?',
+ 'place-message-allowed?', 'place-sleep', 'place-wait', 'place?',
+ 'placeholder-get', 'placeholder-set!', 'placeholder?',
+ 'plumber-add-flush!', 'plumber-flush-all',
+ 'plumber-flush-handle-remove!', 'plumber-flush-handle?', 'plumber?',
+ 'poll-guard-evt', 'port->bytes', 'port->bytes-lines', 'port->lines',
+ 'port->list', 'port->string', 'port-closed-evt', 'port-closed?',
+ 'port-commit-peeked', 'port-count-lines!', 'port-count-lines-enabled',
+ 'port-counts-lines?', 'port-display-handler', 'port-file-identity',
+ 'port-file-unlock', 'port-next-location', 'port-number?',
+ 'port-print-handler', 'port-progress-evt',
+ 'port-provides-progress-evts?', 'port-read-handler',
+ 'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?',
+ 'port-writes-special?', 'port?', 'positive?', 'predicate/c',
+ 'prefab-key->struct-type', 'prefab-key?', 'prefab-struct-key',
+ 'preferences-lock-file-mode', 'pregexp', 'pregexp?', 'pretty-display',
+ 'pretty-format', 'pretty-print', 'pretty-print-.-symbol-without-bars',
+ 'pretty-print-abbreviate-read-macros', 'pretty-print-columns',
+ 'pretty-print-current-style-table', 'pretty-print-depth',
+ 'pretty-print-exact-as-decimal', 'pretty-print-extend-style-table',
+ 'pretty-print-handler', 'pretty-print-newline',
+ 'pretty-print-post-print-hook', 'pretty-print-pre-print-hook',
+ 'pretty-print-print-hook', 'pretty-print-print-line',
+ 'pretty-print-remap-stylable', 'pretty-print-show-inexactness',
+ 'pretty-print-size-hook', 'pretty-print-style-table?',
+ 'pretty-printing', 'pretty-write', 'primitive-closure?',
+ 'primitive-result-arity', 'primitive?', 'print', 'print-as-expression',
+ 'print-boolean-long-form', 'print-box', 'print-graph',
+ 'print-hash-table', 'print-mpair-curly-braces',
+ 'print-pair-curly-braces', 'print-reader-abbreviations',
+ 'print-struct', 'print-syntax-width', 'print-unreadable',
+ 'print-vector-length', 'printable/c', 'printable<%>', 'printf',
+ 'println', 'procedure->method', 'procedure-arity',
+ 'procedure-arity-includes/c', 'procedure-arity-includes?',
+ 'procedure-arity?', 'procedure-closure-contents-eq?',
+ 'procedure-extract-target', 'procedure-keywords',
+ 'procedure-reduce-arity', 'procedure-reduce-keyword-arity',
+ 'procedure-rename', 'procedure-result-arity', 'procedure-specialize',
+ 'procedure-struct-type?', 'procedure?', 'process', 'process*',
+ 'process*/ports', 'process/ports', 'processor-count', 'progress-evt?',
+ 'promise-forced?', 'promise-running?', 'promise/c', 'promise/name?',
+ 'promise?', 'prop:arity-string', 'prop:arrow-contract',
+ 'prop:arrow-contract-get-info', 'prop:arrow-contract?', 'prop:blame',
+ 'prop:chaperone-contract', 'prop:checked-procedure', 'prop:contract',
+ 'prop:contracted', 'prop:custom-print-quotable', 'prop:custom-write',
+ 'prop:dict', 'prop:dict/contract', 'prop:equal+hash', 'prop:evt',
+ 'prop:exn:missing-module', 'prop:exn:srclocs',
+ 'prop:expansion-contexts', 'prop:flat-contract',
+ 'prop:impersonator-of', 'prop:input-port',
+ 'prop:liberal-define-context', 'prop:object-name',
+ 'prop:opt-chaperone-contract', 'prop:opt-chaperone-contract-get-test',
+ 'prop:opt-chaperone-contract?', 'prop:orc-contract',
+ 'prop:orc-contract-get-subcontracts', 'prop:orc-contract?',
+ 'prop:output-port', 'prop:place-location', 'prop:procedure',
+ 'prop:recursive-contract', 'prop:recursive-contract-unroll',
+ 'prop:recursive-contract?', 'prop:rename-transformer', 'prop:sequence',
+ 'prop:set!-transformer', 'prop:stream', 'proper-subset?',
+ 'pseudo-random-generator->vector', 'pseudo-random-generator-vector?',
+ 'pseudo-random-generator?', 'put-preferences', 'putenv', 'quotient',
+ 'quotient/remainder', 'radians->degrees', 'raise',
+ 'raise-argument-error', 'raise-arguments-error', 'raise-arity-error',
+ 'raise-blame-error', 'raise-contract-error', 'raise-mismatch-error',
+ 'raise-not-cons-blame-error', 'raise-range-error',
+ 'raise-result-error', 'raise-syntax-error', 'raise-type-error',
+ 'raise-user-error', 'random', 'random-seed', 'range', 'rational?',
+ 'rationalize', 'read', 'read-accept-bar-quote', 'read-accept-box',
+ 'read-accept-compiled', 'read-accept-dot', 'read-accept-graph',
+ 'read-accept-infix-dot', 'read-accept-lang', 'read-accept-quasiquote',
+ 'read-accept-reader', 'read-byte', 'read-byte-or-special',
+ 'read-bytes', 'read-bytes!', 'read-bytes!-evt', 'read-bytes-avail!',
+ 'read-bytes-avail!*', 'read-bytes-avail!-evt',
+ 'read-bytes-avail!/enable-break', 'read-bytes-evt', 'read-bytes-line',
+ 'read-bytes-line-evt', 'read-case-sensitive', 'read-cdot', 'read-char',
+ 'read-char-or-special', 'read-curly-brace-as-paren',
+ 'read-curly-brace-with-tag', 'read-decimal-as-inexact',
+ 'read-eval-print-loop', 'read-language', 'read-line', 'read-line-evt',
+ 'read-on-demand-source', 'read-square-bracket-as-paren',
+ 'read-square-bracket-with-tag', 'read-string', 'read-string!',
+ 'read-string!-evt', 'read-string-evt', 'read-syntax',
+ 'read-syntax/recursive', 'read/recursive', 'readtable-mapping',
+ 'readtable?', 'real->decimal-string', 'real->double-flonum',
+ 'real->floating-point-bytes', 'real->single-flonum', 'real-in',
+ 'real-part', 'real?', 'reencode-input-port', 'reencode-output-port',
+ 'regexp', 'regexp-match', 'regexp-match*', 'regexp-match-evt',
+ 'regexp-match-exact?', 'regexp-match-peek',
+ 'regexp-match-peek-immediate', 'regexp-match-peek-positions',
+ 'regexp-match-peek-positions*',
+ 'regexp-match-peek-positions-immediate',
+ 'regexp-match-peek-positions-immediate/end',
+ 'regexp-match-peek-positions/end', 'regexp-match-positions',
+ 'regexp-match-positions*', 'regexp-match-positions/end',
+ 'regexp-match/end', 'regexp-match?', 'regexp-max-lookbehind',
+ 'regexp-quote', 'regexp-replace', 'regexp-replace*',
+ 'regexp-replace-quote', 'regexp-replaces', 'regexp-split',
+ 'regexp-try-match', 'regexp?', 'relative-path?', 'relocate-input-port',
+ 'relocate-output-port', 'remainder', 'remf', 'remf*', 'remove',
+ 'remove*', 'remove-duplicates', 'remq', 'remq*', 'remv', 'remv*',
+ 'rename-contract', 'rename-file-or-directory',
+ 'rename-transformer-target', 'rename-transformer?', 'replace-evt',
+ 'reroot-path', 'resolve-path', 'resolved-module-path-name',
+ 'resolved-module-path?', 'rest', 'reverse', 'round', 'second',
+ 'seconds->date', 'security-guard?', 'semaphore-peek-evt',
+ 'semaphore-peek-evt?', 'semaphore-post', 'semaphore-try-wait?',
+ 'semaphore-wait', 'semaphore-wait/enable-break', 'semaphore?',
+ 'sequence->list', 'sequence->stream', 'sequence-add-between',
+ 'sequence-andmap', 'sequence-append', 'sequence-count',
+ 'sequence-filter', 'sequence-fold', 'sequence-for-each',
+ 'sequence-generate', 'sequence-generate*', 'sequence-length',
+ 'sequence-map', 'sequence-ormap', 'sequence-ref', 'sequence-tail',
+ 'sequence/c', 'sequence?', 'set', 'set!-transformer-procedure',
+ 'set!-transformer?', 'set->list', 'set->stream', 'set-add', 'set-add!',
+ 'set-box!', 'set-clear', 'set-clear!', 'set-copy', 'set-copy-clear',
+ 'set-count', 'set-empty?', 'set-eq?', 'set-equal?', 'set-eqv?',
+ 'set-first', 'set-for-each', 'set-implements/c', 'set-implements?',
+ 'set-intersect', 'set-intersect!', 'set-map', 'set-mcar!', 'set-mcdr!',
+ 'set-member?', 'set-mutable?', 'set-phantom-bytes!',
+ 'set-port-next-location!', 'set-remove', 'set-remove!', 'set-rest',
+ 'set-some-basic-contracts!', 'set-subtract', 'set-subtract!',
+ 'set-symmetric-difference', 'set-symmetric-difference!', 'set-union',
+ 'set-union!', 'set-weak?', 'set/c', 'set=?', 'set?', 'seteq', 'seteqv',
+ 'seventh', 'sgn', 'shared-bytes', 'shell-execute', 'shrink-path-wrt',
+ 'shuffle', 'simple-form-path', 'simplify-path', 'sin',
+ 'single-flonum?', 'sinh', 'sixth', 'skip-projection-wrapper?', 'sleep',
+ 'some-system-path->string', 'sort', 'special-comment-value',
+ 'special-comment?', 'special-filter-input-port', 'split-at',
+ 'split-at-right', 'split-common-prefix', 'split-path', 'splitf-at',
+ 'splitf-at-right', 'sqr', 'sqrt', 'srcloc', 'srcloc->string',
+ 'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source',
+ 'srcloc-span', 'srcloc?', 'stop-after', 'stop-before', 'stream->list',
+ 'stream-add-between', 'stream-andmap', 'stream-append', 'stream-count',
+ 'stream-empty?', 'stream-filter', 'stream-first', 'stream-fold',
+ 'stream-for-each', 'stream-length', 'stream-map', 'stream-ormap',
+ 'stream-ref', 'stream-rest', 'stream-tail', 'stream/c', 'stream?',
+ 'string', 'string->bytes/latin-1', 'string->bytes/locale',
+ 'string->bytes/utf-8', 'string->immutable-string', 'string->keyword',
+ 'string->list', 'string->number', 'string->path',
+ 'string->path-element', 'string->some-system-path', 'string->symbol',
+ 'string->uninterned-symbol', 'string->unreadable-symbol',
+ 'string-append', 'string-append*', 'string-ci<=?', 'string-ci<?',
+ 'string-ci=?', 'string-ci>=?', 'string-ci>?', 'string-contains?',
+ 'string-copy', 'string-copy!', 'string-downcase',
+ 'string-environment-variable-name?', 'string-fill!', 'string-foldcase',
+ 'string-join', 'string-len/c', 'string-length', 'string-locale-ci<?',
+ 'string-locale-ci=?', 'string-locale-ci>?', 'string-locale-downcase',
+ 'string-locale-upcase', 'string-locale<?', 'string-locale=?',
+ 'string-locale>?', 'string-no-nuls?', 'string-normalize-nfc',
+ 'string-normalize-nfd', 'string-normalize-nfkc',
+ 'string-normalize-nfkd', 'string-normalize-spaces', 'string-port?',
+ 'string-prefix?', 'string-ref', 'string-replace', 'string-set!',
+ 'string-split', 'string-suffix?', 'string-titlecase', 'string-trim',
+ 'string-upcase', 'string-utf-8-length', 'string<=?', 'string<?',
+ 'string=?', 'string>=?', 'string>?', 'string?', 'struct->vector',
+ 'struct-accessor-procedure?', 'struct-constructor-procedure?',
+ 'struct-info', 'struct-mutator-procedure?',
+ 'struct-predicate-procedure?', 'struct-type-info',
+ 'struct-type-make-constructor', 'struct-type-make-predicate',
+ 'struct-type-property-accessor-procedure?', 'struct-type-property/c',
+ 'struct-type-property?', 'struct-type?', 'struct:arity-at-least',
+ 'struct:arrow-contract-info', 'struct:date', 'struct:date*',
+ 'struct:exn', 'struct:exn:break', 'struct:exn:break:hang-up',
+ 'struct:exn:break:terminate', 'struct:exn:fail',
+ 'struct:exn:fail:contract', 'struct:exn:fail:contract:arity',
+ 'struct:exn:fail:contract:blame',
+ 'struct:exn:fail:contract:continuation',
+ 'struct:exn:fail:contract:divide-by-zero',
+ 'struct:exn:fail:contract:non-fixnum-result',
+ 'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem',
+ 'struct:exn:fail:filesystem:errno',
+ 'struct:exn:fail:filesystem:exists',
+ 'struct:exn:fail:filesystem:missing-module',
+ 'struct:exn:fail:filesystem:version', 'struct:exn:fail:network',
+ 'struct:exn:fail:network:errno', 'struct:exn:fail:object',
+ 'struct:exn:fail:out-of-memory', 'struct:exn:fail:read',
+ 'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char',
+ 'struct:exn:fail:syntax', 'struct:exn:fail:syntax:missing-module',
+ 'struct:exn:fail:syntax:unbound', 'struct:exn:fail:unsupported',
+ 'struct:exn:fail:user', 'struct:srcloc',
+ 'struct:wrapped-extra-arg-arrow', 'struct?', 'sub1', 'subbytes',
+ 'subclass?', 'subclass?/c', 'subprocess', 'subprocess-group-enabled',
+ 'subprocess-kill', 'subprocess-pid', 'subprocess-status',
+ 'subprocess-wait', 'subprocess?', 'subset?', 'substring', 'suggest/c',
+ 'symbol->string', 'symbol-interned?', 'symbol-unreadable?', 'symbol<?',
+ 'symbol=?', 'symbol?', 'symbols', 'sync', 'sync/enable-break',
+ 'sync/timeout', 'sync/timeout/enable-break', 'syntax->datum',
+ 'syntax->list', 'syntax-arm', 'syntax-column', 'syntax-debug-info',
+ 'syntax-disarm', 'syntax-e', 'syntax-line',
+ 'syntax-local-bind-syntaxes', 'syntax-local-certifier',
+ 'syntax-local-context', 'syntax-local-expand-expression',
+ 'syntax-local-get-shadower', 'syntax-local-identifier-as-binding',
+ 'syntax-local-introduce', 'syntax-local-lift-context',
+ 'syntax-local-lift-expression', 'syntax-local-lift-module',
+ 'syntax-local-lift-module-end-declaration',
+ 'syntax-local-lift-provide', 'syntax-local-lift-require',
+ 'syntax-local-lift-values-expression',
+ 'syntax-local-make-definition-context',
+ 'syntax-local-make-delta-introducer',
+ 'syntax-local-module-defined-identifiers',
+ 'syntax-local-module-exports',
+ 'syntax-local-module-required-identifiers', 'syntax-local-name',
+ 'syntax-local-phase-level', 'syntax-local-submodules',
+ 'syntax-local-transforming-module-provides?', 'syntax-local-value',
+ 'syntax-local-value/immediate', 'syntax-original?', 'syntax-position',
+ 'syntax-property', 'syntax-property-preserved?',
+ 'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm',
+ 'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source',
+ 'syntax-source-module', 'syntax-span', 'syntax-taint',
+ 'syntax-tainted?', 'syntax-track-origin',
+ 'syntax-transforming-module-expression?',
+ 'syntax-transforming-with-lifts?', 'syntax-transforming?', 'syntax/c',
+ 'syntax?', 'system', 'system*', 'system*/exit-code',
+ 'system-big-endian?', 'system-idle-evt', 'system-language+country',
+ 'system-library-subpath', 'system-path-convention-type', 'system-type',
+ 'system/exit-code', 'tail-marks-match?', 'take', 'take-common-prefix',
+ 'take-right', 'takef', 'takef-right', 'tan', 'tanh',
+ 'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt',
+ 'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses',
+ 'tcp-close', 'tcp-connect', 'tcp-connect/enable-break', 'tcp-listen',
+ 'tcp-listener?', 'tcp-port?', 'tentative-pretty-print-port-cancel',
+ 'tentative-pretty-print-port-transfer', 'tenth', 'terminal-port?',
+ 'the-unsupplied-arg', 'third', 'thread', 'thread-cell-ref',
+ 'thread-cell-set!', 'thread-cell-values?', 'thread-cell?',
+ 'thread-dead-evt', 'thread-dead?', 'thread-group?', 'thread-receive',
+ 'thread-receive-evt', 'thread-resume', 'thread-resume-evt',
+ 'thread-rewind-receive', 'thread-running?', 'thread-send',
+ 'thread-suspend', 'thread-suspend-evt', 'thread-try-receive',
+ 'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply',
+ 'touch', 'transplant-input-port', 'transplant-output-port', 'true',
+ 'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?', 'udp-close',
+ 'udp-connect!', 'udp-connected?', 'udp-multicast-interface',
+ 'udp-multicast-join-group!', 'udp-multicast-leave-group!',
+ 'udp-multicast-loopback?', 'udp-multicast-set-interface!',
+ 'udp-multicast-set-loopback!', 'udp-multicast-set-ttl!',
+ 'udp-multicast-ttl', 'udp-open-socket', 'udp-receive!',
+ 'udp-receive!*', 'udp-receive!-evt', 'udp-receive!/enable-break',
+ 'udp-receive-ready-evt', 'udp-send', 'udp-send*', 'udp-send-evt',
+ 'udp-send-ready-evt', 'udp-send-to', 'udp-send-to*', 'udp-send-to-evt',
+ 'udp-send-to/enable-break', 'udp-send/enable-break', 'udp?', 'unbox',
+ 'uncaught-exception-handler', 'unit?', 'unspecified-dom',
+ 'unsupplied-arg?', 'use-collection-link-paths',
+ 'use-compiled-file-paths', 'use-user-specific-search-paths',
+ 'user-execute-bit', 'user-read-bit', 'user-write-bit', 'value-blame',
+ 'value-contract', 'values', 'variable-reference->empty-namespace',
+ 'variable-reference->module-base-phase',
+ 'variable-reference->module-declaration-inspector',
+ 'variable-reference->module-path-index',
+ 'variable-reference->module-source', 'variable-reference->namespace',
+ 'variable-reference->phase',
+ 'variable-reference->resolved-module-path',
+ 'variable-reference-constant?', 'variable-reference?', 'vector',
+ 'vector->immutable-vector', 'vector->list',
+ 'vector->pseudo-random-generator', 'vector->pseudo-random-generator!',
+ 'vector->values', 'vector-append', 'vector-argmax', 'vector-argmin',
+ 'vector-copy', 'vector-copy!', 'vector-count', 'vector-drop',
+ 'vector-drop-right', 'vector-fill!', 'vector-filter',
+ 'vector-filter-not', 'vector-immutable', 'vector-immutable/c',
+ 'vector-immutableof', 'vector-length', 'vector-map', 'vector-map!',
+ 'vector-member', 'vector-memq', 'vector-memv', 'vector-ref',
+ 'vector-set!', 'vector-set*!', 'vector-set-performance-stats!',
+ 'vector-split-at', 'vector-split-at-right', 'vector-take',
+ 'vector-take-right', 'vector/c', 'vector?', 'vectorof', 'version',
+ 'void', 'void?', 'weak-box-value', 'weak-box?', 'weak-set',
+ 'weak-seteq', 'weak-seteqv', 'will-execute', 'will-executor?',
+ 'will-register', 'will-try-execute', 'with-input-from-bytes',
+ 'with-input-from-file', 'with-input-from-string',
+ 'with-output-to-bytes', 'with-output-to-file', 'with-output-to-string',
+ 'would-be-future', 'wrap-evt', 'wrapped-extra-arg-arrow',
+ 'wrapped-extra-arg-arrow-extra-neg-party-argument',
+ 'wrapped-extra-arg-arrow-real-func', 'wrapped-extra-arg-arrow?',
+ 'writable<%>', 'write', 'write-byte', 'write-bytes',
+ 'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt',
+ 'write-bytes-avail/enable-break', 'write-char', 'write-special',
+ 'write-special-avail*', 'write-special-evt', 'write-string',
+ 'write-to-file', 'writeln', 'xor', 'zero?', '~.a', '~.s', '~.v', '~a',
+ '~e', '~r', '~s', '~v'
+ )
+
+ _opening_parenthesis = r'[([{]'
+ _closing_parenthesis = r'[)\]}]'
+ _delimiters = r'()[\]{}",\'`;\s'
+ _symbol = r'(?:\|[^|]*\||\\[\w\W]|[^|\\%s]+)+' % _delimiters
+ _exact_decimal_prefix = r'(?:#e)?(?:#d)?(?:#e)?'
+ _exponent = r'(?:[defls][-+]?\d+)'
+ _inexact_simple_no_hashes = r'(?:\d+(?:/\d+|\.\d*)?|\.\d+)'
+ _inexact_simple = (r'(?:%s|(?:\d+#+(?:\.#*|/\d+#*)?|\.\d+#+|'
+ r'\d+(?:\.\d*#+|/\d+#+)))' % _inexact_simple_no_hashes)
+ _inexact_normal_no_hashes = r'(?:%s%s?)' % (_inexact_simple_no_hashes,
+ _exponent)
+ _inexact_normal = r'(?:%s%s?)' % (_inexact_simple, _exponent)
+ _inexact_special = r'(?:(?:inf|nan)\.[0f])'
+ _inexact_real = r'(?:[-+]?%s|[-+]%s)' % (_inexact_normal,
+ _inexact_special)
+ _inexact_unsigned = r'(?:%s|%s)' % (_inexact_normal, _inexact_special)
+
+ tokens = {
+ 'root': [
+ (_closing_parenthesis, Error),
+ (r'(?!\Z)', Text, 'unquoted-datum')
+ ],
+ 'datum': [
+ (r'(?s)#;|#![ /]([^\\\n]|\\.)*', Comment),
+ (r';[^\n\r\x85\u2028\u2029]*', Comment.Single),
+ (r'#\|', Comment.Multiline, 'block-comment'),
+
+ # Whitespaces
+ (r'(?u)\s+', Whitespace),
+
+ # Numbers: Keep in mind Racket reader hash prefixes, which
+ # can denote the base or the type. These don't map neatly
+ # onto Pygments token types; some judgment calls here.
+
+ # #d or no prefix
+ (r'(?i)%s[-+]?\d+(?=[%s])' % (_exact_decimal_prefix, _delimiters),
+ Number.Integer, '#pop'),
+ (r'(?i)%s[-+]?(\d+(\.\d*)?|\.\d+)([deflst][-+]?\d+)?(?=[%s])' %
+ (_exact_decimal_prefix, _delimiters), Number.Float, '#pop'),
+ (r'(?i)%s[-+]?(%s([-+]%s?i)?|[-+]%s?i)(?=[%s])' %
+ (_exact_decimal_prefix, _inexact_normal_no_hashes,
+ _inexact_normal_no_hashes, _inexact_normal_no_hashes,
+ _delimiters), Number, '#pop'),
+
+ # Inexact without explicit #i
+ (r'(?i)(#d)?(%s([-+]%s?i)?|[-+]%s?i|%s@%s)(?=[%s])' %
+ (_inexact_real, _inexact_unsigned, _inexact_unsigned,
+ _inexact_real, _inexact_real, _delimiters), Number.Float,
+ '#pop'),
+
+ # The remaining extflonums
+ (r'(?i)(([-+]?%st[-+]?\d+)|[-+](inf|nan)\.t)(?=[%s])' %
+ (_inexact_simple, _delimiters), Number.Float, '#pop'),
+
+ # #b
+ (r'(?iu)(#[ei])?#b%s' % _symbol, Number.Bin, '#pop'),
+
+ # #o
+ (r'(?iu)(#[ei])?#o%s' % _symbol, Number.Oct, '#pop'),
+
+ # #x
+ (r'(?iu)(#[ei])?#x%s' % _symbol, Number.Hex, '#pop'),
+
+ # #i is always inexact, i.e. float
+ (r'(?iu)(#d)?#i%s' % _symbol, Number.Float, '#pop'),
+
+ # Strings and characters
+ (r'#?"', String.Double, ('#pop', 'string')),
+ (r'#<<(.+)\n(^(?!\1$).*$\n)*^\1$', String.Heredoc, '#pop'),
+ (r'#\\(u[\da-fA-F]{1,4}|U[\da-fA-F]{1,8})', String.Char, '#pop'),
+ (r'(?is)#\\([0-7]{3}|[a-z]+|.)', String.Char, '#pop'),
+ (r'(?s)#[pr]x#?"(\\?.)*?"', String.Regex, '#pop'),
+
+ # Constants
+ (r'#(true|false|[tTfF])', Name.Constant, '#pop'),
+
+ # Keyword argument names (e.g. #:keyword)
+ (r'#:%s' % _symbol, Keyword.Declaration, '#pop'),
+
+ # Reader extensions
+ (r'(#lang |#!)(\S+)',
+ bygroups(Keyword.Namespace, Name.Namespace)),
+ (r'#reader', Keyword.Namespace, 'quoted-datum'),
+
+ # Other syntax
+ (r"(?i)\.(?=[%s])|#c[is]|#['`]|#,@?" % _delimiters, Operator),
+ (r"'|#[s&]|#hash(eqv?)?|#\d*(?=%s)" % _opening_parenthesis,
+ Operator, ('#pop', 'quoted-datum'))
+ ],
+ 'datum*': [
+ (r'`|,@?', Operator),
+ (_symbol, String.Symbol, '#pop'),
+ (r'[|\\]', Error),
+ default('#pop')
+ ],
+ 'list': [
+ (_closing_parenthesis, Punctuation, '#pop')
+ ],
+ 'unquoted-datum': [
+ include('datum'),
+ (r'quote(?=[%s])' % _delimiters, Keyword,
+ ('#pop', 'quoted-datum')),
+ (r'`', Operator, ('#pop', 'quasiquoted-datum')),
+ (r'quasiquote(?=[%s])' % _delimiters, Keyword,
+ ('#pop', 'quasiquoted-datum')),
+ (_opening_parenthesis, Punctuation, ('#pop', 'unquoted-list')),
+ (words(_keywords, suffix='(?=[%s])' % _delimiters),
+ Keyword, '#pop'),
+ (words(_builtins, suffix='(?=[%s])' % _delimiters),
+ Name.Builtin, '#pop'),
+ (_symbol, Name, '#pop'),
+ include('datum*')
+ ],
+ 'unquoted-list': [
+ include('list'),
+ (r'(?!\Z)', Text, 'unquoted-datum')
+ ],
+ 'quasiquoted-datum': [
+ include('datum'),
+ (r',@?', Operator, ('#pop', 'unquoted-datum')),
+ (r'unquote(-splicing)?(?=[%s])' % _delimiters, Keyword,
+ ('#pop', 'unquoted-datum')),
+ (_opening_parenthesis, Punctuation, ('#pop', 'quasiquoted-list')),
+ include('datum*')
+ ],
+ 'quasiquoted-list': [
+ include('list'),
+ (r'(?!\Z)', Text, 'quasiquoted-datum')
+ ],
+ 'quoted-datum': [
+ include('datum'),
+ (_opening_parenthesis, Punctuation, ('#pop', 'quoted-list')),
+ include('datum*')
+ ],
+ 'quoted-list': [
+ include('list'),
+ (r'(?!\Z)', Text, 'quoted-datum')
+ ],
+ 'block-comment': [
+ (r'#\|', Comment.Multiline, '#push'),
+ (r'\|#', Comment.Multiline, '#pop'),
+ (r'[^#|]+|.', Comment.Multiline)
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'(?s)\\([0-7]{1,3}|x[\da-fA-F]{1,2}|u[\da-fA-F]{1,4}|'
+ r'U[\da-fA-F]{1,8}|.)', String.Escape),
+ (r'[^\\"]+', String.Double)
+ ]
+ }
+
+
+class NewLispLexer(RegexLexer):
+ """
+ For newLISP source code (version 10.3.0).
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'NewLisp'
+ url = 'http://www.newlisp.org/'
+ aliases = ['newlisp']
+ filenames = ['*.lsp', '*.nl', '*.kif']
+ mimetypes = ['text/x-newlisp', 'application/x-newlisp']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ # list of built-in functions for newLISP version 10.3
+ builtins = (
+ '^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++',
+ '<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10',
+ '$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7',
+ '$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs',
+ 'acos', 'acosh', 'add', 'address', 'amb', 'and', 'append-file',
+ 'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin',
+ 'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec',
+ 'base64-enc', 'bayes-query', 'bayes-train', 'begin',
+ 'beta', 'betai', 'bind', 'binomial', 'bits', 'callback',
+ 'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean',
+ 'close', 'command-event', 'cond', 'cons', 'constant',
+ 'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count',
+ 'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry',
+ 'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec',
+ 'def-new', 'default', 'define-macro', 'define',
+ 'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device',
+ 'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while',
+ 'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup',
+ 'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event',
+ 'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand',
+ 'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter',
+ 'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt',
+ 'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln',
+ 'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string',
+ 'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc',
+ 'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert',
+ 'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error',
+ 'last', 'legal?', 'length', 'let', 'letex', 'letn',
+ 'list?', 'list', 'load', 'local', 'log', 'lookup',
+ 'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat',
+ 'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply',
+ 'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error',
+ 'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local',
+ 'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping',
+ 'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select',
+ 'net-send-to', 'net-send-udp', 'net-send', 'net-service',
+ 'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper',
+ 'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack',
+ 'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop',
+ 'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print',
+ 'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event',
+ 'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand',
+ 'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file',
+ 'read-key', 'read-line', 'read-utf8', 'reader-event',
+ 'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex',
+ 'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse',
+ 'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self',
+ 'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all',
+ 'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent',
+ 'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt',
+ 'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?',
+ 'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term',
+ 'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case',
+ 'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?',
+ 'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until',
+ 'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while',
+ 'write', 'write-char', 'write-file', 'write-line',
+ 'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?',
+ )
+
+ # valid names
+ valid_name = r'([\w!$%&*+.,/<=>?@^~|-])+|(\[.*?\])+'
+
+ tokens = {
+ 'root': [
+ # shebang
+ (r'#!(.*?)$', Comment.Preproc),
+ # comments starting with semicolon
+ (r';.*$', Comment.Single),
+ # comments starting with #
+ (r'#.*$', Comment.Single),
+
+ # whitespace
+ (r'\s+', Whitespace),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+
+ # braces
+ (r'\{', String, "bracestring"),
+
+ # [text] ... [/text] delimited strings
+ (r'\[text\]*', String, "tagstring"),
+
+ # 'special' operators...
+ (r"('|:)", Operator),
+
+ # highlight the builtins
+ (words(builtins, suffix=r'\b'),
+ Keyword),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Variable),
+
+ # the remaining variables
+ (valid_name, String.Symbol),
+
+ # parentheses
+ (r'(\(|\))', Punctuation),
+ ],
+
+ # braced strings...
+ 'bracestring': [
+ (r'\{', String, "#push"),
+ (r'\}', String, "#pop"),
+ ('[^{}]+', String),
+ ],
+
+ # tagged [text]...[/text] delimited strings...
+ 'tagstring': [
+ (r'(?s)(.*?)(\[/text\])', String, '#pop'),
+ ],
+ }
+
+
+class EmacsLispLexer(RegexLexer):
+ """
+ An ELisp lexer, parsing a stream and outputting the tokens
+ needed to highlight elisp code.
+
+ .. versionadded:: 2.1
+ """
+ name = 'EmacsLisp'
+ aliases = ['emacs-lisp', 'elisp', 'emacs']
+ filenames = ['*.el']
+ mimetypes = ['text/x-elisp', 'application/x-elisp']
+
+ flags = re.MULTILINE
+
+ # couple of useful regexes
+
+ # characters that are not macro-characters and can be used to begin a symbol
+ nonmacro = r'\\.|[\w!$%&*+-/<=>?@^{}~|]'
+ constituent = nonmacro + '|[#.:]'
+ terminated = r'(?=[ "()\]\'\n,;`])' # whitespace or terminating macro characters
+
+ # symbol token, reverse-engineered from hyperspec
+ # Take a deep breath...
+ symbol = r'((?:%s)(?:%s)*)' % (nonmacro, constituent)
+
+ macros = {
+ 'atomic-change-group', 'case', 'block', 'cl-block', 'cl-callf', 'cl-callf2',
+ 'cl-case', 'cl-decf', 'cl-declaim', 'cl-declare',
+ 'cl-define-compiler-macro', 'cl-defmacro', 'cl-defstruct',
+ 'cl-defsubst', 'cl-deftype', 'cl-defun', 'cl-destructuring-bind',
+ 'cl-do', 'cl-do*', 'cl-do-all-symbols', 'cl-do-symbols', 'cl-dolist',
+ 'cl-dotimes', 'cl-ecase', 'cl-etypecase', 'eval-when', 'cl-eval-when', 'cl-flet',
+ 'cl-flet*', 'cl-function', 'cl-incf', 'cl-labels', 'cl-letf',
+ 'cl-letf*', 'cl-load-time-value', 'cl-locally', 'cl-loop',
+ 'cl-macrolet', 'cl-multiple-value-bind', 'cl-multiple-value-setq',
+ 'cl-progv', 'cl-psetf', 'cl-psetq', 'cl-pushnew', 'cl-remf',
+ 'cl-return', 'cl-return-from', 'cl-rotatef', 'cl-shiftf',
+ 'cl-symbol-macrolet', 'cl-tagbody', 'cl-the', 'cl-typecase',
+ 'combine-after-change-calls', 'condition-case-unless-debug', 'decf',
+ 'declaim', 'declare', 'declare-function', 'def-edebug-spec',
+ 'defadvice', 'defclass', 'defcustom', 'defface', 'defgeneric',
+ 'defgroup', 'define-advice', 'define-alternatives',
+ 'define-compiler-macro', 'define-derived-mode', 'define-generic-mode',
+ 'define-global-minor-mode', 'define-globalized-minor-mode',
+ 'define-minor-mode', 'define-modify-macro',
+ 'define-obsolete-face-alias', 'define-obsolete-function-alias',
+ 'define-obsolete-variable-alias', 'define-setf-expander',
+ 'define-skeleton', 'defmacro', 'defmethod', 'defsetf', 'defstruct',
+ 'defsubst', 'deftheme', 'deftype', 'defun', 'defvar-local',
+ 'delay-mode-hooks', 'destructuring-bind', 'do', 'do*',
+ 'do-all-symbols', 'do-symbols', 'dolist', 'dont-compile', 'dotimes',
+ 'dotimes-with-progress-reporter', 'ecase', 'ert-deftest', 'etypecase',
+ 'eval-and-compile', 'eval-when-compile', 'flet', 'ignore-errors',
+ 'incf', 'labels', 'lambda', 'letrec', 'lexical-let', 'lexical-let*',
+ 'loop', 'multiple-value-bind', 'multiple-value-setq', 'noreturn',
+ 'oref', 'oref-default', 'oset', 'oset-default', 'pcase',
+ 'pcase-defmacro', 'pcase-dolist', 'pcase-exhaustive', 'pcase-let',
+ 'pcase-let*', 'pop', 'psetf', 'psetq', 'push', 'pushnew', 'remf',
+ 'return', 'rotatef', 'rx', 'save-match-data', 'save-selected-window',
+ 'save-window-excursion', 'setf', 'setq-local', 'shiftf',
+ 'track-mouse', 'typecase', 'unless', 'use-package', 'when',
+ 'while-no-input', 'with-case-table', 'with-category-table',
+ 'with-coding-priority', 'with-current-buffer', 'with-demoted-errors',
+ 'with-eval-after-load', 'with-file-modes', 'with-local-quit',
+ 'with-output-to-string', 'with-output-to-temp-buffer',
+ 'with-parsed-tramp-file-name', 'with-selected-frame',
+ 'with-selected-window', 'with-silent-modifications', 'with-slots',
+ 'with-syntax-table', 'with-temp-buffer', 'with-temp-file',
+ 'with-temp-message', 'with-timeout', 'with-tramp-connection-property',
+ 'with-tramp-file-property', 'with-tramp-progress-reporter',
+ 'with-wrapper-hook', 'load-time-value', 'locally', 'macrolet', 'progv',
+ 'return-from',
+ }
+
+ special_forms = {
+ 'and', 'catch', 'cond', 'condition-case', 'defconst', 'defvar',
+ 'function', 'if', 'interactive', 'let', 'let*', 'or', 'prog1',
+ 'prog2', 'progn', 'quote', 'save-current-buffer', 'save-excursion',
+ 'save-restriction', 'setq', 'setq-default', 'subr-arity',
+ 'unwind-protect', 'while',
+ }
+
+ builtin_function = {
+ '%', '*', '+', '-', '/', '/=', '1+', '1-', '<', '<=', '=', '>', '>=',
+ 'Snarf-documentation', 'abort-recursive-edit', 'abs',
+ 'accept-process-output', 'access-file', 'accessible-keymaps', 'acos',
+ 'active-minibuffer-window', 'add-face-text-property',
+ 'add-name-to-file', 'add-text-properties', 'all-completions',
+ 'append', 'apply', 'apropos-internal', 'aref', 'arrayp', 'aset',
+ 'ash', 'asin', 'assoc', 'assoc-string', 'assq', 'atan', 'atom',
+ 'autoload', 'autoload-do-load', 'backtrace', 'backtrace--locals',
+ 'backtrace-debug', 'backtrace-eval', 'backtrace-frame',
+ 'backward-char', 'backward-prefix-chars', 'barf-if-buffer-read-only',
+ 'base64-decode-region', 'base64-decode-string',
+ 'base64-encode-region', 'base64-encode-string', 'beginning-of-line',
+ 'bidi-find-overridden-directionality', 'bidi-resolved-levels',
+ 'bitmap-spec-p', 'bobp', 'bolp', 'bool-vector',
+ 'bool-vector-count-consecutive', 'bool-vector-count-population',
+ 'bool-vector-exclusive-or', 'bool-vector-intersection',
+ 'bool-vector-not', 'bool-vector-p', 'bool-vector-set-difference',
+ 'bool-vector-subsetp', 'bool-vector-union', 'boundp',
+ 'buffer-base-buffer', 'buffer-chars-modified-tick',
+ 'buffer-enable-undo', 'buffer-file-name', 'buffer-has-markers-at',
+ 'buffer-list', 'buffer-live-p', 'buffer-local-value',
+ 'buffer-local-variables', 'buffer-modified-p', 'buffer-modified-tick',
+ 'buffer-name', 'buffer-size', 'buffer-string', 'buffer-substring',
+ 'buffer-substring-no-properties', 'buffer-swap-text', 'bufferp',
+ 'bury-buffer-internal', 'byte-code', 'byte-code-function-p',
+ 'byte-to-position', 'byte-to-string', 'byteorder',
+ 'call-interactively', 'call-last-kbd-macro', 'call-process',
+ 'call-process-region', 'cancel-kbd-macro-events', 'capitalize',
+ 'capitalize-region', 'capitalize-word', 'car', 'car-less-than-car',
+ 'car-safe', 'case-table-p', 'category-docstring',
+ 'category-set-mnemonics', 'category-table', 'category-table-p',
+ 'ccl-execute', 'ccl-execute-on-string', 'ccl-program-p', 'cdr',
+ 'cdr-safe', 'ceiling', 'char-after', 'char-before',
+ 'char-category-set', 'char-charset', 'char-equal', 'char-or-string-p',
+ 'char-resolve-modifiers', 'char-syntax', 'char-table-extra-slot',
+ 'char-table-p', 'char-table-parent', 'char-table-range',
+ 'char-table-subtype', 'char-to-string', 'char-width', 'characterp',
+ 'charset-after', 'charset-id-internal', 'charset-plist',
+ 'charset-priority-list', 'charsetp', 'check-coding-system',
+ 'check-coding-systems-region', 'clear-buffer-auto-save-failure',
+ 'clear-charset-maps', 'clear-face-cache', 'clear-font-cache',
+ 'clear-image-cache', 'clear-string', 'clear-this-command-keys',
+ 'close-font', 'clrhash', 'coding-system-aliases',
+ 'coding-system-base', 'coding-system-eol-type', 'coding-system-p',
+ 'coding-system-plist', 'coding-system-priority-list',
+ 'coding-system-put', 'color-distance', 'color-gray-p',
+ 'color-supported-p', 'combine-after-change-execute',
+ 'command-error-default-function', 'command-remapping', 'commandp',
+ 'compare-buffer-substrings', 'compare-strings',
+ 'compare-window-configurations', 'completing-read',
+ 'compose-region-internal', 'compose-string-internal',
+ 'composition-get-gstring', 'compute-motion', 'concat', 'cons',
+ 'consp', 'constrain-to-field', 'continue-process',
+ 'controlling-tty-p', 'coordinates-in-window-p', 'copy-alist',
+ 'copy-category-table', 'copy-file', 'copy-hash-table', 'copy-keymap',
+ 'copy-marker', 'copy-sequence', 'copy-syntax-table', 'copysign',
+ 'cos', 'current-active-maps', 'current-bidi-paragraph-direction',
+ 'current-buffer', 'current-case-table', 'current-column',
+ 'current-global-map', 'current-idle-time', 'current-indentation',
+ 'current-input-mode', 'current-local-map', 'current-message',
+ 'current-minor-mode-maps', 'current-time', 'current-time-string',
+ 'current-time-zone', 'current-window-configuration',
+ 'cygwin-convert-file-name-from-windows',
+ 'cygwin-convert-file-name-to-windows', 'daemon-initialized',
+ 'daemonp', 'dbus--init-bus', 'dbus-get-unique-name',
+ 'dbus-message-internal', 'debug-timer-check', 'declare-equiv-charset',
+ 'decode-big5-char', 'decode-char', 'decode-coding-region',
+ 'decode-coding-string', 'decode-sjis-char', 'decode-time',
+ 'default-boundp', 'default-file-modes', 'default-printer-name',
+ 'default-toplevel-value', 'default-value', 'define-category',
+ 'define-charset-alias', 'define-charset-internal',
+ 'define-coding-system-alias', 'define-coding-system-internal',
+ 'define-fringe-bitmap', 'define-hash-table-test', 'define-key',
+ 'define-prefix-command', 'delete',
+ 'delete-all-overlays', 'delete-and-extract-region', 'delete-char',
+ 'delete-directory-internal', 'delete-field', 'delete-file',
+ 'delete-frame', 'delete-other-windows-internal', 'delete-overlay',
+ 'delete-process', 'delete-region', 'delete-terminal',
+ 'delete-window-internal', 'delq', 'describe-buffer-bindings',
+ 'describe-vector', 'destroy-fringe-bitmap', 'detect-coding-region',
+ 'detect-coding-string', 'ding', 'directory-file-name',
+ 'directory-files', 'directory-files-and-attributes', 'discard-input',
+ 'display-supports-face-attributes-p', 'do-auto-save', 'documentation',
+ 'documentation-property', 'downcase', 'downcase-region',
+ 'downcase-word', 'draw-string', 'dump-colors', 'dump-emacs',
+ 'dump-face', 'dump-frame-glyph-matrix', 'dump-glyph-matrix',
+ 'dump-glyph-row', 'dump-redisplay-history', 'dump-tool-bar-row',
+ 'elt', 'emacs-pid', 'encode-big5-char', 'encode-char',
+ 'encode-coding-region', 'encode-coding-string', 'encode-sjis-char',
+ 'encode-time', 'end-kbd-macro', 'end-of-line', 'eobp', 'eolp', 'eq',
+ 'eql', 'equal', 'equal-including-properties', 'erase-buffer',
+ 'error-message-string', 'eval', 'eval-buffer', 'eval-region',
+ 'event-convert-list', 'execute-kbd-macro', 'exit-recursive-edit',
+ 'exp', 'expand-file-name', 'expt', 'external-debugging-output',
+ 'face-attribute-relative-p', 'face-attributes-as-vector', 'face-font',
+ 'fboundp', 'fceiling', 'fetch-bytecode', 'ffloor',
+ 'field-beginning', 'field-end', 'field-string',
+ 'field-string-no-properties', 'file-accessible-directory-p',
+ 'file-acl', 'file-attributes', 'file-attributes-lessp',
+ 'file-directory-p', 'file-executable-p', 'file-exists-p',
+ 'file-locked-p', 'file-modes', 'file-name-absolute-p',
+ 'file-name-all-completions', 'file-name-as-directory',
+ 'file-name-completion', 'file-name-directory',
+ 'file-name-nondirectory', 'file-newer-than-file-p', 'file-readable-p',
+ 'file-regular-p', 'file-selinux-context', 'file-symlink-p',
+ 'file-system-info', 'file-system-info', 'file-writable-p',
+ 'fillarray', 'find-charset-region', 'find-charset-string',
+ 'find-coding-systems-region-internal', 'find-composition-internal',
+ 'find-file-name-handler', 'find-font', 'find-operation-coding-system',
+ 'float', 'float-time', 'floatp', 'floor', 'fmakunbound',
+ 'following-char', 'font-at', 'font-drive-otf', 'font-face-attributes',
+ 'font-family-list', 'font-get', 'font-get-glyphs',
+ 'font-get-system-font', 'font-get-system-normal-font', 'font-info',
+ 'font-match-p', 'font-otf-alternates', 'font-put',
+ 'font-shape-gstring', 'font-spec', 'font-variation-glyphs',
+ 'font-xlfd-name', 'fontp', 'fontset-font', 'fontset-info',
+ 'fontset-list', 'fontset-list-all', 'force-mode-line-update',
+ 'force-window-update', 'format', 'format-mode-line',
+ 'format-network-address', 'format-time-string', 'forward-char',
+ 'forward-comment', 'forward-line', 'forward-word',
+ 'frame-border-width', 'frame-bottom-divider-width',
+ 'frame-can-run-window-configuration-change-hook', 'frame-char-height',
+ 'frame-char-width', 'frame-face-alist', 'frame-first-window',
+ 'frame-focus', 'frame-font-cache', 'frame-fringe-width', 'frame-list',
+ 'frame-live-p', 'frame-or-buffer-changed-p', 'frame-parameter',
+ 'frame-parameters', 'frame-pixel-height', 'frame-pixel-width',
+ 'frame-pointer-visible-p', 'frame-right-divider-width',
+ 'frame-root-window', 'frame-scroll-bar-height',
+ 'frame-scroll-bar-width', 'frame-selected-window', 'frame-terminal',
+ 'frame-text-cols', 'frame-text-height', 'frame-text-lines',
+ 'frame-text-width', 'frame-total-cols', 'frame-total-lines',
+ 'frame-visible-p', 'framep', 'frexp', 'fringe-bitmaps-at-pos',
+ 'fround', 'fset', 'ftruncate', 'funcall', 'funcall-interactively',
+ 'function-equal', 'functionp', 'gap-position', 'gap-size',
+ 'garbage-collect', 'gc-status', 'generate-new-buffer-name', 'get',
+ 'get-buffer', 'get-buffer-create', 'get-buffer-process',
+ 'get-buffer-window', 'get-byte', 'get-char-property',
+ 'get-char-property-and-overlay', 'get-file-buffer', 'get-file-char',
+ 'get-internal-run-time', 'get-load-suffixes', 'get-pos-property',
+ 'get-process', 'get-screen-color', 'get-text-property',
+ 'get-unicode-property-internal', 'get-unused-category',
+ 'get-unused-iso-final-char', 'getenv-internal', 'gethash',
+ 'gfile-add-watch', 'gfile-rm-watch', 'global-key-binding',
+ 'gnutls-available-p', 'gnutls-boot', 'gnutls-bye', 'gnutls-deinit',
+ 'gnutls-error-fatalp', 'gnutls-error-string', 'gnutls-errorp',
+ 'gnutls-get-initstage', 'gnutls-peer-status',
+ 'gnutls-peer-status-warning-describe', 'goto-char', 'gpm-mouse-start',
+ 'gpm-mouse-stop', 'group-gid', 'group-real-gid',
+ 'handle-save-session', 'handle-switch-frame', 'hash-table-count',
+ 'hash-table-p', 'hash-table-rehash-size',
+ 'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
+ 'hash-table-weakness', 'iconify-frame', 'identity', 'image-flush',
+ 'image-mask-p', 'image-metadata', 'image-size', 'imagemagick-types',
+ 'imagep', 'indent-to', 'indirect-function', 'indirect-variable',
+ 'init-image-library', 'inotify-add-watch', 'inotify-rm-watch',
+ 'input-pending-p', 'insert', 'insert-and-inherit',
+ 'insert-before-markers', 'insert-before-markers-and-inherit',
+ 'insert-buffer-substring', 'insert-byte', 'insert-char',
+ 'insert-file-contents', 'insert-startup-screen', 'int86',
+ 'integer-or-marker-p', 'integerp', 'interactive-form', 'intern',
+ 'intern-soft', 'internal--track-mouse', 'internal-char-font',
+ 'internal-complete-buffer', 'internal-copy-lisp-face',
+ 'internal-default-process-filter',
+ 'internal-default-process-sentinel', 'internal-describe-syntax-value',
+ 'internal-event-symbol-parse-modifiers',
+ 'internal-face-x-get-resource', 'internal-get-lisp-face-attribute',
+ 'internal-lisp-face-attribute-values', 'internal-lisp-face-empty-p',
+ 'internal-lisp-face-equal-p', 'internal-lisp-face-p',
+ 'internal-make-lisp-face', 'internal-make-var-non-special',
+ 'internal-merge-in-global-face',
+ 'internal-set-alternative-font-family-alist',
+ 'internal-set-alternative-font-registry-alist',
+ 'internal-set-font-selection-order',
+ 'internal-set-lisp-face-attribute',
+ 'internal-set-lisp-face-attribute-from-resource',
+ 'internal-show-cursor', 'internal-show-cursor-p', 'interrupt-process',
+ 'invisible-p', 'invocation-directory', 'invocation-name', 'isnan',
+ 'iso-charset', 'key-binding', 'key-description',
+ 'keyboard-coding-system', 'keymap-parent', 'keymap-prompt', 'keymapp',
+ 'keywordp', 'kill-all-local-variables', 'kill-buffer', 'kill-emacs',
+ 'kill-local-variable', 'kill-process', 'last-nonminibuffer-frame',
+ 'lax-plist-get', 'lax-plist-put', 'ldexp', 'length',
+ 'libxml-parse-html-region', 'libxml-parse-xml-region',
+ 'line-beginning-position', 'line-end-position', 'line-pixel-height',
+ 'list', 'list-fonts', 'list-system-processes', 'listp', 'load',
+ 'load-average', 'local-key-binding', 'local-variable-if-set-p',
+ 'local-variable-p', 'locale-info', 'locate-file-internal',
+ 'lock-buffer', 'log', 'logand', 'logb', 'logior', 'lognot', 'logxor',
+ 'looking-at', 'lookup-image', 'lookup-image-map', 'lookup-key',
+ 'lower-frame', 'lsh', 'macroexpand', 'make-bool-vector',
+ 'make-byte-code', 'make-category-set', 'make-category-table',
+ 'make-char', 'make-char-table', 'make-directory-internal',
+ 'make-frame-invisible', 'make-frame-visible', 'make-hash-table',
+ 'make-indirect-buffer', 'make-keymap', 'make-list',
+ 'make-local-variable', 'make-marker', 'make-network-process',
+ 'make-overlay', 'make-serial-process', 'make-sparse-keymap',
+ 'make-string', 'make-symbol', 'make-symbolic-link', 'make-temp-name',
+ 'make-terminal-frame', 'make-variable-buffer-local',
+ 'make-variable-frame-local', 'make-vector', 'makunbound',
+ 'map-char-table', 'map-charset-chars', 'map-keymap',
+ 'map-keymap-internal', 'mapatoms', 'mapc', 'mapcar', 'mapconcat',
+ 'maphash', 'mark-marker', 'marker-buffer', 'marker-insertion-type',
+ 'marker-position', 'markerp', 'match-beginning', 'match-data',
+ 'match-end', 'matching-paren', 'max', 'max-char', 'md5', 'member',
+ 'memory-info', 'memory-limit', 'memory-use-counts', 'memq', 'memql',
+ 'menu-bar-menu-at-x-y', 'menu-or-popup-active-p',
+ 'menu-or-popup-active-p', 'merge-face-attribute', 'message',
+ 'message-box', 'message-or-box', 'min',
+ 'minibuffer-completion-contents', 'minibuffer-contents',
+ 'minibuffer-contents-no-properties', 'minibuffer-depth',
+ 'minibuffer-prompt', 'minibuffer-prompt-end',
+ 'minibuffer-selected-window', 'minibuffer-window', 'minibufferp',
+ 'minor-mode-key-binding', 'mod', 'modify-category-entry',
+ 'modify-frame-parameters', 'modify-syntax-entry',
+ 'mouse-pixel-position', 'mouse-position', 'move-overlay',
+ 'move-point-visually', 'move-to-column', 'move-to-window-line',
+ 'msdos-downcase-filename', 'msdos-long-file-names', 'msdos-memget',
+ 'msdos-memput', 'msdos-mouse-disable', 'msdos-mouse-enable',
+ 'msdos-mouse-init', 'msdos-mouse-p', 'msdos-remember-default-colors',
+ 'msdos-set-keyboard', 'msdos-set-mouse-buttons',
+ 'multibyte-char-to-unibyte', 'multibyte-string-p', 'narrow-to-region',
+ 'natnump', 'nconc', 'network-interface-info',
+ 'network-interface-list', 'new-fontset', 'newline-cache-check',
+ 'next-char-property-change', 'next-frame', 'next-overlay-change',
+ 'next-property-change', 'next-read-file-uses-dialog-p',
+ 'next-single-char-property-change', 'next-single-property-change',
+ 'next-window', 'nlistp', 'nreverse', 'nth', 'nthcdr', 'null',
+ 'number-or-marker-p', 'number-to-string', 'numberp',
+ 'open-dribble-file', 'open-font', 'open-termscript',
+ 'optimize-char-table', 'other-buffer', 'other-window-for-scrolling',
+ 'overlay-buffer', 'overlay-end', 'overlay-get', 'overlay-lists',
+ 'overlay-properties', 'overlay-put', 'overlay-recenter',
+ 'overlay-start', 'overlayp', 'overlays-at', 'overlays-in',
+ 'parse-partial-sexp', 'play-sound-internal', 'plist-get',
+ 'plist-member', 'plist-put', 'point', 'point-marker', 'point-max',
+ 'point-max-marker', 'point-min', 'point-min-marker',
+ 'pos-visible-in-window-p', 'position-bytes', 'posix-looking-at',
+ 'posix-search-backward', 'posix-search-forward', 'posix-string-match',
+ 'posn-at-point', 'posn-at-x-y', 'preceding-char',
+ 'prefix-numeric-value', 'previous-char-property-change',
+ 'previous-frame', 'previous-overlay-change',
+ 'previous-property-change', 'previous-single-char-property-change',
+ 'previous-single-property-change', 'previous-window', 'prin1',
+ 'prin1-to-string', 'princ', 'print', 'process-attributes',
+ 'process-buffer', 'process-coding-system', 'process-command',
+ 'process-connection', 'process-contact', 'process-datagram-address',
+ 'process-exit-status', 'process-filter', 'process-filter-multibyte-p',
+ 'process-id', 'process-inherit-coding-system-flag', 'process-list',
+ 'process-mark', 'process-name', 'process-plist',
+ 'process-query-on-exit-flag', 'process-running-child-p',
+ 'process-send-eof', 'process-send-region', 'process-send-string',
+ 'process-sentinel', 'process-status', 'process-tty-name',
+ 'process-type', 'processp', 'profiler-cpu-log',
+ 'profiler-cpu-running-p', 'profiler-cpu-start', 'profiler-cpu-stop',
+ 'profiler-memory-log', 'profiler-memory-running-p',
+ 'profiler-memory-start', 'profiler-memory-stop', 'propertize',
+ 'purecopy', 'put', 'put-text-property',
+ 'put-unicode-property-internal', 'puthash', 'query-font',
+ 'query-fontset', 'quit-process', 'raise-frame', 'random', 'rassoc',
+ 'rassq', 're-search-backward', 're-search-forward', 'read',
+ 'read-buffer', 'read-char', 'read-char-exclusive',
+ 'read-coding-system', 'read-command', 'read-event',
+ 'read-from-minibuffer', 'read-from-string', 'read-function',
+ 'read-key-sequence', 'read-key-sequence-vector',
+ 'read-no-blanks-input', 'read-non-nil-coding-system', 'read-string',
+ 'read-variable', 'recent-auto-save-p', 'recent-doskeys',
+ 'recent-keys', 'recenter', 'recursion-depth', 'recursive-edit',
+ 'redirect-debugging-output', 'redirect-frame-focus', 'redisplay',
+ 'redraw-display', 'redraw-frame', 'regexp-quote', 'region-beginning',
+ 'region-end', 'register-ccl-program', 'register-code-conversion-map',
+ 'remhash', 'remove-list-of-text-properties', 'remove-text-properties',
+ 'rename-buffer', 'rename-file', 'replace-match',
+ 'reset-this-command-lengths', 'resize-mini-window-internal',
+ 'restore-buffer-modified-p', 'resume-tty', 'reverse', 'round',
+ 'run-hook-with-args', 'run-hook-with-args-until-failure',
+ 'run-hook-with-args-until-success', 'run-hook-wrapped', 'run-hooks',
+ 'run-window-configuration-change-hook', 'run-window-scroll-functions',
+ 'safe-length', 'scan-lists', 'scan-sexps', 'scroll-down',
+ 'scroll-left', 'scroll-other-window', 'scroll-right', 'scroll-up',
+ 'search-backward', 'search-forward', 'secure-hash', 'select-frame',
+ 'select-window', 'selected-frame', 'selected-window',
+ 'self-insert-command', 'send-string-to-terminal', 'sequencep',
+ 'serial-process-configure', 'set', 'set-buffer',
+ 'set-buffer-auto-saved', 'set-buffer-major-mode',
+ 'set-buffer-modified-p', 'set-buffer-multibyte', 'set-case-table',
+ 'set-category-table', 'set-char-table-extra-slot',
+ 'set-char-table-parent', 'set-char-table-range', 'set-charset-plist',
+ 'set-charset-priority', 'set-coding-system-priority',
+ 'set-cursor-size', 'set-default', 'set-default-file-modes',
+ 'set-default-toplevel-value', 'set-file-acl', 'set-file-modes',
+ 'set-file-selinux-context', 'set-file-times', 'set-fontset-font',
+ 'set-frame-height', 'set-frame-position', 'set-frame-selected-window',
+ 'set-frame-size', 'set-frame-width', 'set-fringe-bitmap-face',
+ 'set-input-interrupt-mode', 'set-input-meta-mode', 'set-input-mode',
+ 'set-keyboard-coding-system-internal', 'set-keymap-parent',
+ 'set-marker', 'set-marker-insertion-type', 'set-match-data',
+ 'set-message-beep', 'set-minibuffer-window',
+ 'set-mouse-pixel-position', 'set-mouse-position',
+ 'set-network-process-option', 'set-output-flow-control',
+ 'set-process-buffer', 'set-process-coding-system',
+ 'set-process-datagram-address', 'set-process-filter',
+ 'set-process-filter-multibyte',
+ 'set-process-inherit-coding-system-flag', 'set-process-plist',
+ 'set-process-query-on-exit-flag', 'set-process-sentinel',
+ 'set-process-window-size', 'set-quit-char',
+ 'set-safe-terminal-coding-system-internal', 'set-screen-color',
+ 'set-standard-case-table', 'set-syntax-table',
+ 'set-terminal-coding-system-internal', 'set-terminal-local-value',
+ 'set-terminal-parameter', 'set-text-properties', 'set-time-zone-rule',
+ 'set-visited-file-modtime', 'set-window-buffer',
+ 'set-window-combination-limit', 'set-window-configuration',
+ 'set-window-dedicated-p', 'set-window-display-table',
+ 'set-window-fringes', 'set-window-hscroll', 'set-window-margins',
+ 'set-window-new-normal', 'set-window-new-pixel',
+ 'set-window-new-total', 'set-window-next-buffers',
+ 'set-window-parameter', 'set-window-point', 'set-window-prev-buffers',
+ 'set-window-redisplay-end-trigger', 'set-window-scroll-bars',
+ 'set-window-start', 'set-window-vscroll', 'setcar', 'setcdr',
+ 'setplist', 'show-face-resources', 'signal', 'signal-process', 'sin',
+ 'single-key-description', 'skip-chars-backward', 'skip-chars-forward',
+ 'skip-syntax-backward', 'skip-syntax-forward', 'sleep-for', 'sort',
+ 'sort-charsets', 'special-variable-p', 'split-char',
+ 'split-window-internal', 'sqrt', 'standard-case-table',
+ 'standard-category-table', 'standard-syntax-table', 'start-kbd-macro',
+ 'start-process', 'stop-process', 'store-kbd-macro-event', 'string',
+ 'string=', 'string<', 'string>', 'string-as-multibyte',
+ 'string-as-unibyte', 'string-bytes', 'string-collate-equalp',
+ 'string-collate-lessp', 'string-equal', 'string-greaterp',
+ 'string-lessp', 'string-make-multibyte', 'string-make-unibyte',
+ 'string-match', 'string-to-char', 'string-to-multibyte',
+ 'string-to-number', 'string-to-syntax', 'string-to-unibyte',
+ 'string-width', 'stringp', 'subr-name', 'subrp',
+ 'subst-char-in-region', 'substitute-command-keys',
+ 'substitute-in-file-name', 'substring', 'substring-no-properties',
+ 'suspend-emacs', 'suspend-tty', 'suspicious-object', 'sxhash',
+ 'symbol-function', 'symbol-name', 'symbol-plist', 'symbol-value',
+ 'symbolp', 'syntax-table', 'syntax-table-p', 'system-groups',
+ 'system-move-file-to-trash', 'system-name', 'system-users', 'tan',
+ 'terminal-coding-system', 'terminal-list', 'terminal-live-p',
+ 'terminal-local-value', 'terminal-name', 'terminal-parameter',
+ 'terminal-parameters', 'terpri', 'test-completion',
+ 'text-char-description', 'text-properties-at', 'text-property-any',
+ 'text-property-not-all', 'this-command-keys',
+ 'this-command-keys-vector', 'this-single-command-keys',
+ 'this-single-command-raw-keys', 'time-add', 'time-less-p',
+ 'time-subtract', 'tool-bar-get-system-style', 'tool-bar-height',
+ 'tool-bar-pixel-width', 'top-level', 'trace-redisplay',
+ 'trace-to-stderr', 'translate-region-internal', 'transpose-regions',
+ 'truncate', 'try-completion', 'tty-display-color-cells',
+ 'tty-display-color-p', 'tty-no-underline',
+ 'tty-suppress-bold-inverse-default-colors', 'tty-top-frame',
+ 'tty-type', 'type-of', 'undo-boundary', 'unencodable-char-position',
+ 'unhandled-file-name-directory', 'unibyte-char-to-multibyte',
+ 'unibyte-string', 'unicode-property-table-internal', 'unify-charset',
+ 'unintern', 'unix-sync', 'unlock-buffer', 'upcase', 'upcase-initials',
+ 'upcase-initials-region', 'upcase-region', 'upcase-word',
+ 'use-global-map', 'use-local-map', 'user-full-name',
+ 'user-login-name', 'user-real-login-name', 'user-real-uid',
+ 'user-uid', 'variable-binding-locus', 'vconcat', 'vector',
+ 'vector-or-char-table-p', 'vectorp', 'verify-visited-file-modtime',
+ 'vertical-motion', 'visible-frame-list', 'visited-file-modtime',
+ 'w16-get-clipboard-data', 'w16-selection-exists-p',
+ 'w16-set-clipboard-data', 'w32-battery-status',
+ 'w32-default-color-map', 'w32-define-rgb-color',
+ 'w32-display-monitor-attributes-list', 'w32-frame-menu-bar-size',
+ 'w32-frame-rect', 'w32-get-clipboard-data',
+ 'w32-get-codepage-charset', 'w32-get-console-codepage',
+ 'w32-get-console-output-codepage', 'w32-get-current-locale-id',
+ 'w32-get-default-locale-id', 'w32-get-keyboard-layout',
+ 'w32-get-locale-info', 'w32-get-valid-codepages',
+ 'w32-get-valid-keyboard-layouts', 'w32-get-valid-locale-ids',
+ 'w32-has-winsock', 'w32-long-file-name', 'w32-reconstruct-hot-key',
+ 'w32-register-hot-key', 'w32-registered-hot-keys',
+ 'w32-selection-exists-p', 'w32-send-sys-command',
+ 'w32-set-clipboard-data', 'w32-set-console-codepage',
+ 'w32-set-console-output-codepage', 'w32-set-current-locale',
+ 'w32-set-keyboard-layout', 'w32-set-process-priority',
+ 'w32-shell-execute', 'w32-short-file-name', 'w32-toggle-lock-key',
+ 'w32-unload-winsock', 'w32-unregister-hot-key', 'w32-window-exists-p',
+ 'w32notify-add-watch', 'w32notify-rm-watch',
+ 'waiting-for-user-input-p', 'where-is-internal', 'widen',
+ 'widget-apply', 'widget-get', 'widget-put',
+ 'window-absolute-pixel-edges', 'window-at', 'window-body-height',
+ 'window-body-width', 'window-bottom-divider-width', 'window-buffer',
+ 'window-combination-limit', 'window-configuration-frame',
+ 'window-configuration-p', 'window-dedicated-p',
+ 'window-display-table', 'window-edges', 'window-end', 'window-frame',
+ 'window-fringes', 'window-header-line-height', 'window-hscroll',
+ 'window-inside-absolute-pixel-edges', 'window-inside-edges',
+ 'window-inside-pixel-edges', 'window-left-child',
+ 'window-left-column', 'window-line-height', 'window-list',
+ 'window-list-1', 'window-live-p', 'window-margins',
+ 'window-minibuffer-p', 'window-mode-line-height', 'window-new-normal',
+ 'window-new-pixel', 'window-new-total', 'window-next-buffers',
+ 'window-next-sibling', 'window-normal-size', 'window-old-point',
+ 'window-parameter', 'window-parameters', 'window-parent',
+ 'window-pixel-edges', 'window-pixel-height', 'window-pixel-left',
+ 'window-pixel-top', 'window-pixel-width', 'window-point',
+ 'window-prev-buffers', 'window-prev-sibling',
+ 'window-redisplay-end-trigger', 'window-resize-apply',
+ 'window-resize-apply-total', 'window-right-divider-width',
+ 'window-scroll-bar-height', 'window-scroll-bar-width',
+ 'window-scroll-bars', 'window-start', 'window-system',
+ 'window-text-height', 'window-text-pixel-size', 'window-text-width',
+ 'window-top-child', 'window-top-line', 'window-total-height',
+ 'window-total-width', 'window-use-time', 'window-valid-p',
+ 'window-vscroll', 'windowp', 'write-char', 'write-region',
+ 'x-backspace-delete-keys-p', 'x-change-window-property',
+ 'x-change-window-property', 'x-close-connection',
+ 'x-close-connection', 'x-create-frame', 'x-create-frame',
+ 'x-delete-window-property', 'x-delete-window-property',
+ 'x-disown-selection-internal', 'x-display-backing-store',
+ 'x-display-backing-store', 'x-display-color-cells',
+ 'x-display-color-cells', 'x-display-grayscale-p',
+ 'x-display-grayscale-p', 'x-display-list', 'x-display-list',
+ 'x-display-mm-height', 'x-display-mm-height', 'x-display-mm-width',
+ 'x-display-mm-width', 'x-display-monitor-attributes-list',
+ 'x-display-pixel-height', 'x-display-pixel-height',
+ 'x-display-pixel-width', 'x-display-pixel-width', 'x-display-planes',
+ 'x-display-planes', 'x-display-save-under', 'x-display-save-under',
+ 'x-display-screens', 'x-display-screens', 'x-display-visual-class',
+ 'x-display-visual-class', 'x-family-fonts', 'x-file-dialog',
+ 'x-file-dialog', 'x-file-dialog', 'x-focus-frame', 'x-frame-geometry',
+ 'x-frame-geometry', 'x-get-atom-name', 'x-get-resource',
+ 'x-get-selection-internal', 'x-hide-tip', 'x-hide-tip',
+ 'x-list-fonts', 'x-load-color-file', 'x-menu-bar-open-internal',
+ 'x-menu-bar-open-internal', 'x-open-connection', 'x-open-connection',
+ 'x-own-selection-internal', 'x-parse-geometry', 'x-popup-dialog',
+ 'x-popup-menu', 'x-register-dnd-atom', 'x-select-font',
+ 'x-select-font', 'x-selection-exists-p', 'x-selection-owner-p',
+ 'x-send-client-message', 'x-server-max-request-size',
+ 'x-server-max-request-size', 'x-server-vendor', 'x-server-vendor',
+ 'x-server-version', 'x-server-version', 'x-show-tip', 'x-show-tip',
+ 'x-synchronize', 'x-synchronize', 'x-uses-old-gtk-dialog',
+ 'x-window-property', 'x-window-property', 'x-wm-set-size-hint',
+ 'xw-color-defined-p', 'xw-color-defined-p', 'xw-color-values',
+ 'xw-color-values', 'xw-display-color-p', 'xw-display-color-p',
+ 'yes-or-no-p', 'zlib-available-p', 'zlib-decompress-region',
+ 'forward-point',
+ }
+
+ builtin_function_highlighted = {
+ 'defvaralias', 'provide', 'require',
+ 'with-no-warnings', 'define-widget', 'with-electric-help',
+ 'throw', 'defalias', 'featurep'
+ }
+
+ lambda_list_keywords = {
+ '&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
+ '&rest', '&whole',
+ }
+
+ error_keywords = {
+ 'cl-assert', 'cl-check-type', 'error', 'signal',
+ 'user-error', 'warn',
+ }
+
+ def get_tokens_unprocessed(self, text):
+ stack = ['root']
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name.Variable:
+ if value in EmacsLispLexer.builtin_function:
+ yield index, Name.Function, value
+ continue
+ if value in EmacsLispLexer.special_forms:
+ yield index, Keyword, value
+ continue
+ if value in EmacsLispLexer.error_keywords:
+ yield index, Name.Exception, value
+ continue
+ if value in EmacsLispLexer.builtin_function_highlighted:
+ yield index, Name.Builtin, value
+ continue
+ if value in EmacsLispLexer.macros:
+ yield index, Name.Builtin, value
+ continue
+ if value in EmacsLispLexer.lambda_list_keywords:
+ yield index, Keyword.Pseudo, value
+ continue
+ yield index, token, value
+
+ tokens = {
+ 'root': [
+ default('body'),
+ ],
+ 'body': [
+ # whitespace
+ (r'\s+', Whitespace),
+
+ # single-line comment
+ (r';.*$', Comment.Single),
+
+ # strings and characters
+ (r'"', String, 'string'),
+ (r'\?([^\\]|\\.)', String.Char),
+ # quoting
+ (r":" + symbol, Name.Builtin),
+ (r"::" + symbol, String.Symbol),
+ (r"'" + symbol, String.Symbol),
+ (r"'", Operator),
+ (r"`", Operator),
+
+ # decimal numbers
+ (r'[-+]?\d+\.?' + terminated, Number.Integer),
+ (r'[-+]?\d+/\d+' + terminated, Number),
+ (r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
+ terminated, Number.Float),
+
+ # vectors
+ (r'\[|\]', Punctuation),
+
+ # uninterned symbol
+ (r'#:' + symbol, String.Symbol),
+
+ # read syntax for char tables
+ (r'#\^\^?', Operator),
+
+ # function shorthand
+ (r'#\'', Name.Function),
+
+ # binary rational
+ (r'#[bB][+-]?[01]+(/[01]+)?', Number.Bin),
+
+ # octal rational
+ (r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
+
+ # hex rational
+ (r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
+
+ # radix rational
+ (r'#\d+r[+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
+
+ # reference
+ (r'#\d+=', Operator),
+ (r'#\d+#', Operator),
+
+ # special operators that should have been parsed already
+ (r'(,@|,|\.|:)', Operator),
+
+ # special constants
+ (r'(t|nil)' + terminated, Name.Constant),
+
+ # functions and variables
+ (r'\*' + symbol + r'\*', Name.Variable.Global),
+ (symbol, Name.Variable),
+
+ # parentheses
+ (r'#\(', Operator, 'body'),
+ (r'\(', Punctuation, 'body'),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'string': [
+ (r'[^"\\`]+', String),
+ (r'`%s\'' % symbol, String.Symbol),
+ (r'`', String),
+ (r'\\.', String),
+ (r'\\\n', String),
+ (r'"', String, '#pop'),
+ ],
+ }
+
+
+class ShenLexer(RegexLexer):
+ """
+ Lexer for Shen source code.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Shen'
+ url = 'http://shenlanguage.org/'
+ aliases = ['shen']
+ filenames = ['*.shen']
+ mimetypes = ['text/x-shen', 'application/x-shen']
+
+ DECLARATIONS = (
+ 'datatype', 'define', 'defmacro', 'defprolog', 'defcc',
+ 'synonyms', 'declare', 'package', 'type', 'function',
+ )
+
+ SPECIAL_FORMS = (
+ 'lambda', 'get', 'let', 'if', 'cases', 'cond', 'put', 'time', 'freeze',
+ 'value', 'load', '$', 'protect', 'or', 'and', 'not', 'do', 'output',
+ 'prolog?', 'trap-error', 'error', 'make-string', '/.', 'set', '@p',
+ '@s', '@v',
+ )
+
+ BUILTINS = (
+ '==', '=', '*', '+', '-', '/', '<', '>', '>=', '<=', '<-address',
+ '<-vector', 'abort', 'absvector', 'absvector?', 'address->', 'adjoin',
+ 'append', 'arity', 'assoc', 'bind', 'boolean?', 'bound?', 'call', 'cd',
+ 'close', 'cn', 'compile', 'concat', 'cons', 'cons?', 'cut', 'destroy',
+ 'difference', 'element?', 'empty?', 'enable-type-theory',
+ 'error-to-string', 'eval', 'eval-kl', 'exception', 'explode', 'external',
+ 'fail', 'fail-if', 'file', 'findall', 'fix', 'fst', 'fwhen', 'gensym',
+ 'get-time', 'hash', 'hd', 'hdstr', 'hdv', 'head', 'identical',
+ 'implementation', 'in', 'include', 'include-all-but', 'inferences',
+ 'input', 'input+', 'integer?', 'intern', 'intersection', 'is', 'kill',
+ 'language', 'length', 'limit', 'lineread', 'loaded', 'macro', 'macroexpand',
+ 'map', 'mapcan', 'maxinferences', 'mode', 'n->string', 'nl', 'nth', 'null',
+ 'number?', 'occurrences', 'occurs-check', 'open', 'os', 'out', 'port',
+ 'porters', 'pos', 'pr', 'preclude', 'preclude-all-but', 'print', 'profile',
+ 'profile-results', 'ps', 'quit', 'read', 'read+', 'read-byte', 'read-file',
+ 'read-file-as-bytelist', 'read-file-as-string', 'read-from-string',
+ 'release', 'remove', 'return', 'reverse', 'run', 'save', 'set',
+ 'simple-error', 'snd', 'specialise', 'spy', 'step', 'stinput', 'stoutput',
+ 'str', 'string->n', 'string->symbol', 'string?', 'subst', 'symbol?',
+ 'systemf', 'tail', 'tc', 'tc?', 'thaw', 'tl', 'tlstr', 'tlv', 'track',
+ 'tuple?', 'undefmacro', 'unify', 'unify!', 'union', 'unprofile',
+ 'unspecialise', 'untrack', 'variable?', 'vector', 'vector->', 'vector?',
+ 'verified', 'version', 'warn', 'when', 'write-byte', 'write-to-file',
+ 'y-or-n?',
+ )
+
+ BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '<e>', '<!>')
+
+ MAPPINGS = {s: Keyword for s in DECLARATIONS}
+ MAPPINGS.update((s, Name.Builtin) for s in BUILTINS)
+ MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS)
+
+ valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:-]'
+ valid_name = '%s+' % valid_symbol_chars
+ symbol_name = r'[a-z!$%%*+,<=>?/.\'@&#_-]%s*' % valid_symbol_chars
+ variable = r'[A-Z]%s*' % valid_symbol_chars
+
+ tokens = {
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'c#\d{1,3};', String.Escape),
+ (r'~[ARS%]', String.Interpol),
+ (r'(?s).', String),
+ ],
+
+ 'root': [
+ (r'(?s)\\\*.*?\*\\', Comment.Multiline), # \* ... *\
+ (r'\\\\.*', Comment.Single), # \\ ...
+ (r'\s+', Whitespace),
+ (r'_{5,}', Punctuation),
+ (r'={5,}', Punctuation),
+ (r'(;|:=|\||--?>|<--?)', Punctuation),
+ (r'(:-|:|\{|\})', Literal),
+ (r'[+-]*\d*\.\d+(e[+-]?\d+)?', Number.Float),
+ (r'[+-]*\d+', Number.Integer),
+ (r'"', String, 'string'),
+ (variable, Name.Variable),
+ (r'(true|false|<>|\[\])', Keyword.Pseudo),
+ (symbol_name, Literal),
+ (r'(\[|\]|\(|\))', Punctuation),
+ ],
+ }
+
+ def get_tokens_unprocessed(self, text):
+ tokens = RegexLexer.get_tokens_unprocessed(self, text)
+ tokens = self._process_symbols(tokens)
+ tokens = self._process_declarations(tokens)
+ return tokens
+
+ def _relevant(self, token):
+ return token not in (Text, Whitespace, Comment.Single, Comment.Multiline)
+
+ def _process_declarations(self, tokens):
+ opening_paren = False
+ for index, token, value in tokens:
+ yield index, token, value
+ if self._relevant(token):
+ if opening_paren and token == Keyword and value in self.DECLARATIONS:
+ declaration = value
+ yield from self._process_declaration(declaration, tokens)
+ opening_paren = value == '(' and token == Punctuation
+
+ def _process_symbols(self, tokens):
+ opening_paren = False
+ for index, token, value in tokens:
+ if opening_paren and token in (Literal, Name.Variable):
+ token = self.MAPPINGS.get(value, Name.Function)
+ elif token == Literal and value in self.BUILTINS_ANYWHERE:
+ token = Name.Builtin
+ opening_paren = value == '(' and token == Punctuation
+ yield index, token, value
+
+ def _process_declaration(self, declaration, tokens):
+ for index, token, value in tokens:
+ if self._relevant(token):
+ break
+ yield index, token, value
+
+ if declaration == 'datatype':
+ prev_was_colon = False
+ token = Keyword.Type if token == Literal else token
+ yield index, token, value
+ for index, token, value in tokens:
+ if prev_was_colon and token == Literal:
+ token = Keyword.Type
+ yield index, token, value
+ if self._relevant(token):
+ prev_was_colon = token == Literal and value == ':'
+ elif declaration == 'package':
+ token = Name.Namespace if token == Literal else token
+ yield index, token, value
+ elif declaration == 'define':
+ token = Name.Function if token == Literal else token
+ yield index, token, value
+ for index, token, value in tokens:
+ if self._relevant(token):
+ break
+ yield index, token, value
+ if value == '{' and token == Literal:
+ yield index, Punctuation, value
+ for index, token, value in self._process_signature(tokens):
+ yield index, token, value
+ else:
+ yield index, token, value
+ else:
+ token = Name.Function if token == Literal else token
+ yield index, token, value
+
+ return
+
+ def _process_signature(self, tokens):
+ for index, token, value in tokens:
+ if token == Literal and value == '}':
+ yield index, Punctuation, value
+ return
+ elif token in (Literal, Name.Function):
+ token = Name.Variable if value.istitle() else Keyword.Type
+ yield index, token, value
+
+
+class CPSALexer(RegexLexer):
+ """
+ A CPSA lexer based on the CPSA language as of version 2.2.12
+
+ .. versionadded:: 2.1
+ """
+ name = 'CPSA'
+ aliases = ['cpsa']
+ filenames = ['*.cpsa']
+ mimetypes = []
+
+ # list of known keywords and builtins taken form vim 6.4 scheme.vim
+ # syntax file.
+ _keywords = (
+ 'herald', 'vars', 'defmacro', 'include', 'defprotocol', 'defrole',
+ 'defskeleton', 'defstrand', 'deflistener', 'non-orig', 'uniq-orig',
+ 'pen-non-orig', 'precedes', 'trace', 'send', 'recv', 'name', 'text',
+ 'skey', 'akey', 'data', 'mesg',
+ )
+ _builtins = (
+ 'cat', 'enc', 'hash', 'privk', 'pubk', 'invk', 'ltk', 'gen', 'exp',
+ )
+
+ # valid names for identifiers
+ # well, names can only not consist fully of numbers
+ # but this should be good enough for now
+ valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
+
+ tokens = {
+ 'root': [
+ # the comments - always starting with semicolon
+ # and going to the end of the line
+ (r';.*$', Comment.Single),
+
+ # whitespaces - usually not relevant
+ (r'\s+', Whitespace),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+ # support for uncommon kinds of numbers -
+ # have to figure out what the characters mean
+ # (r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"'" + valid_name, String.Symbol),
+ (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
+
+ # constants
+ (r'(#t|#f)', Name.Constant),
+
+ # special operators
+ (r"('|#|`|,@|,|\.)", Operator),
+
+ # highlight the keywords
+ (words(_keywords, suffix=r'\b'), Keyword),
+
+ # first variable in a quoted string like
+ # '(this is syntactic sugar)
+ (r"(?<='\()" + valid_name, Name.Variable),
+ (r"(?<=#\()" + valid_name, Name.Variable),
+
+ # highlight the builtins
+ (words(_builtins, prefix=r'(?<=\()', suffix=r'\b'), Name.Builtin),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Function),
+ # find the remaining variables
+ (valid_name, Name.Variable),
+
+ # the famous parentheses!
+ (r'(\(|\))', Punctuation),
+ (r'(\[|\])', Punctuation),
+ ],
+ }
+
+
+class XtlangLexer(RegexLexer):
+ """An xtlang lexer for the Extempore programming environment.
+
+ This is a mixture of Scheme and xtlang, really. Keyword lists are
+ taken from the Extempore Emacs mode
+ (https://github.com/extemporelang/extempore-emacs-mode)
+
+ .. versionadded:: 2.2
+ """
+ name = 'xtlang'
+ url = 'http://extempore.moso.com.au'
+ aliases = ['extempore']
+ filenames = ['*.xtm']
+ mimetypes = []
+
+ common_keywords = (
+ 'lambda', 'define', 'if', 'else', 'cond', 'and',
+ 'or', 'let', 'begin', 'set!', 'map', 'for-each',
+ )
+ scheme_keywords = (
+ 'do', 'delay', 'quasiquote', 'unquote', 'unquote-splicing', 'eval',
+ 'case', 'let*', 'letrec', 'quote',
+ )
+ xtlang_bind_keywords = (
+ 'bind-func', 'bind-val', 'bind-lib', 'bind-type', 'bind-alias',
+ 'bind-poly', 'bind-dylib', 'bind-lib-func', 'bind-lib-val',
+ )
+ xtlang_keywords = (
+ 'letz', 'memzone', 'cast', 'convert', 'dotimes', 'doloop',
+ )
+ common_functions = (
+ '*', '+', '-', '/', '<', '<=', '=', '>', '>=', '%', 'abs', 'acos',
+ 'angle', 'append', 'apply', 'asin', 'assoc', 'assq', 'assv',
+ 'atan', 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar',
+ 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar',
+ 'caddar', 'cadddr', 'caddr', 'cadr', 'car', 'cdaaar',
+ 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
+ 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr',
+ 'cddr', 'cdr', 'ceiling', 'cons', 'cos', 'floor', 'length',
+ 'list', 'log', 'max', 'member', 'min', 'modulo', 'not',
+ 'reverse', 'round', 'sin', 'sqrt', 'substring', 'tan',
+ 'println', 'random', 'null?', 'callback', 'now',
+ )
+ scheme_functions = (
+ 'call-with-current-continuation', 'call-with-input-file',
+ 'call-with-output-file', 'call-with-values', 'call/cc',
+ 'char->integer', 'char-alphabetic?', 'char-ci<=?', 'char-ci<?',
+ 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
+ 'char-lower-case?', 'char-numeric?', 'char-ready?',
+ 'char-upcase', 'char-upper-case?', 'char-whitespace?',
+ 'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?',
+ 'close-input-port', 'close-output-port', 'complex?',
+ 'current-input-port', 'current-output-port', 'denominator',
+ 'display', 'dynamic-wind', 'eof-object?', 'eq?', 'equal?',
+ 'eqv?', 'even?', 'exact->inexact', 'exact?', 'exp', 'expt',
+ 'force', 'gcd', 'imag-part', 'inexact->exact', 'inexact?',
+ 'input-port?', 'integer->char', 'integer?',
+ 'interaction-environment', 'lcm', 'list->string',
+ 'list->vector', 'list-ref', 'list-tail', 'list?', 'load',
+ 'magnitude', 'make-polar', 'make-rectangular', 'make-string',
+ 'make-vector', 'memq', 'memv', 'negative?', 'newline',
+ 'null-environment', 'number->string', 'number?',
+ 'numerator', 'odd?', 'open-input-file', 'open-output-file',
+ 'output-port?', 'pair?', 'peek-char', 'port?', 'positive?',
+ 'procedure?', 'quotient', 'rational?', 'rationalize', 'read',
+ 'read-char', 'real-part', 'real?',
+ 'remainder', 'scheme-report-environment', 'set-car!', 'set-cdr!',
+ 'string', 'string->list', 'string->number', 'string->symbol',
+ 'string-append', 'string-ci<=?', 'string-ci<?', 'string-ci=?',
+ 'string-ci>=?', 'string-ci>?', 'string-copy', 'string-fill!',
+ 'string-length', 'string-ref', 'string-set!', 'string<=?',
+ 'string<?', 'string=?', 'string>=?', 'string>?', 'string?',
+ 'symbol->string', 'symbol?', 'transcript-off', 'transcript-on',
+ 'truncate', 'values', 'vector', 'vector->list', 'vector-fill!',
+ 'vector-length', 'vector?',
+ 'with-input-from-file', 'with-output-to-file', 'write',
+ 'write-char', 'zero?',
+ )
+ xtlang_functions = (
+ 'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!',
+ 'array-fill!', 'pointer-fill!', 'tuple-fill!', 'vector-fill!', 'free',
+ 'array', 'tuple', 'list', '~', 'cset!', 'cref', '&', 'bor',
+ 'ang-names', '<<', '>>', 'nil', 'printf', 'sprintf', 'null', 'now',
+ 'pset!', 'pref-ptr', 'vset!', 'vref', 'aset!', 'aref', 'aref-ptr',
+ 'tset!', 'tref', 'tref-ptr', 'salloc', 'halloc', 'zalloc', 'alloc',
+ 'schedule', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
+ 'sqrt', 'expt', 'floor', 'ceiling', 'truncate', 'round',
+ 'llvm_printf', 'push_zone', 'pop_zone', 'memzone', 'callback',
+ 'llvm_sprintf', 'make-array', 'array-set!', 'array-ref',
+ 'array-ref-ptr', 'pointer-set!', 'pointer-ref', 'pointer-ref-ptr',
+ 'stack-alloc', 'heap-alloc', 'zone-alloc', 'make-tuple', 'tuple-set!',
+ 'tuple-ref', 'tuple-ref-ptr', 'closure-set!', 'closure-ref', 'pref',
+ 'pdref', 'impc_null', 'bitcast', 'void', 'ifret', 'ret->', 'clrun->',
+ 'make-env-zone', 'make-env', '<>', 'dtof', 'ftod', 'i1tof',
+ 'i1tod', 'i1toi8', 'i1toi32', 'i1toi64', 'i8tof', 'i8tod',
+ 'i8toi1', 'i8toi32', 'i8toi64', 'i32tof', 'i32tod', 'i32toi1',
+ 'i32toi8', 'i32toi64', 'i64tof', 'i64tod', 'i64toi1',
+ 'i64toi8', 'i64toi32',
+ )
+
+ # valid names for Scheme identifiers (names cannot consist fully
+ # of numbers, but this should be good enough for now)
+ valid_scheme_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
+
+ # valid characters in xtlang names & types
+ valid_xtlang_name = r'[\w.!-]+'
+ valid_xtlang_type = r'[]{}[\w<>,*/|!-]+'
+
+ tokens = {
+ # keep track of when we're exiting the xtlang form
+ 'xtlang': [
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop'),
+
+ (r'(?<=bind-func\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-val\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-type\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-alias\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-poly\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-lib\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-dylib\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-lib-func\s)' + valid_xtlang_name, Name.Function),
+ (r'(?<=bind-lib-val\s)' + valid_xtlang_name, Name.Function),
+
+ # type annotations
+ (r':' + valid_xtlang_type, Keyword.Type),
+
+ # types
+ (r'(<' + valid_xtlang_type + r'>|\|' + valid_xtlang_type + r'\||/' +
+ valid_xtlang_type + r'/|' + valid_xtlang_type + r'\*)\**',
+ Keyword.Type),
+
+ # keywords
+ (words(xtlang_keywords, prefix=r'(?<=\()'), Keyword),
+
+ # builtins
+ (words(xtlang_functions, prefix=r'(?<=\()'), Name.Function),
+
+ include('common'),
+
+ # variables
+ (valid_xtlang_name, Name.Variable),
+ ],
+ 'scheme': [
+ # quoted symbols
+ (r"'" + valid_scheme_name, String.Symbol),
+
+ # char literals
+ (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
+
+ # special operators
+ (r"('|#|`|,@|,|\.)", Operator),
+
+ # keywords
+ (words(scheme_keywords, prefix=r'(?<=\()'), Keyword),
+
+ # builtins
+ (words(scheme_functions, prefix=r'(?<=\()'), Name.Function),
+
+ include('common'),
+
+ # variables
+ (valid_scheme_name, Name.Variable),
+ ],
+ # common to both xtlang and Scheme
+ 'common': [
+ # comments
+ (r';.*$', Comment.Single),
+
+ # whitespaces - usually not relevant
+ (r'\s+', Whitespace),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+
+ # binary/oct/hex literals
+ (r'(#b|#o|#x)[\d.]+', Number),
+
+ # strings
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+
+ # true/false constants
+ (r'(#t|#f)', Name.Constant),
+
+ # keywords
+ (words(common_keywords, prefix=r'(?<=\()'), Keyword),
+
+ # builtins
+ (words(common_functions, prefix=r'(?<=\()'), Name.Function),
+
+ # the famous parentheses!
+ (r'(\(|\))', Punctuation),
+ ],
+ 'root': [
+ # go into xtlang mode
+ (words(xtlang_bind_keywords, prefix=r'(?<=\()', suffix=r'\b'),
+ Keyword, 'xtlang'),
+
+ include('scheme')
+ ],
+ }
+
+
+class FennelLexer(RegexLexer):
+ """A lexer for the Fennel programming language.
+
+ Fennel compiles to Lua, so all the Lua builtins are recognized as well
+ as the special forms that are particular to the Fennel compiler.
+
+ .. versionadded:: 2.3
+ """
+ name = 'Fennel'
+ url = 'https://fennel-lang.org'
+ aliases = ['fennel', 'fnl']
+ filenames = ['*.fnl']
+
+ # this list is current as of Fennel version 0.10.0.
+ special_forms = (
+ '#', '%', '*', '+', '-', '->', '->>', '-?>', '-?>>', '.', '..',
+ '/', '//', ':', '<', '<=', '=', '>', '>=', '?.', '^', 'accumulate',
+ 'and', 'band', 'bnot', 'bor', 'bxor', 'collect', 'comment', 'do', 'doc',
+ 'doto', 'each', 'eval-compiler', 'for', 'hashfn', 'icollect', 'if',
+ 'import-macros', 'include', 'length', 'let', 'lshift', 'lua',
+ 'macrodebug', 'match', 'not', 'not=', 'or', 'partial', 'pick-args',
+ 'pick-values', 'quote', 'require-macros', 'rshift', 'set',
+ 'set-forcibly!', 'tset', 'values', 'when', 'while', 'with-open', '~='
+ )
+
+ declarations = (
+ 'fn', 'global', 'lambda', 'local', 'macro', 'macros', 'var', 'λ'
+ )
+
+ builtins = (
+ '_G', '_VERSION', 'arg', 'assert', 'bit32', 'collectgarbage',
+ 'coroutine', 'debug', 'dofile', 'error', 'getfenv',
+ 'getmetatable', 'io', 'ipairs', 'load', 'loadfile', 'loadstring',
+ 'math', 'next', 'os', 'package', 'pairs', 'pcall', 'print',
+ 'rawequal', 'rawget', 'rawlen', 'rawset', 'require', 'select',
+ 'setfenv', 'setmetatable', 'string', 'table', 'tonumber',
+ 'tostring', 'type', 'unpack', 'xpcall'
+ )
+
+ # based on the scheme definition, but disallowing leading digits and
+ # commas, and @ is not allowed.
+ valid_name = r'[a-zA-Z_!$%&*+/:<=>?^~|-][\w!$%&*+/:<=>?^~|\.-]*'
+
+ tokens = {
+ 'root': [
+ # the only comment form is a semicolon; goes to the end of the line
+ (r';.*$', Comment.Single),
+
+ (r',+', Text),
+ (r'\s+', Whitespace),
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+
+ (r'(true|false|nil)', Name.Constant),
+
+ # these are technically strings, but it's worth visually
+ # distinguishing them because their intent is different
+ # from regular strings.
+ (r':' + valid_name, String.Symbol),
+
+ # special forms are keywords
+ (words(special_forms, suffix=' '), Keyword),
+ # these are ... even more special!
+ (words(declarations, suffix=' '), Keyword.Declaration),
+ # lua standard library are builtins
+ (words(builtins, suffix=' '), Name.Builtin),
+ # special-case the vararg symbol
+ (r'\.\.\.', Name.Variable),
+ # regular identifiers
+ (valid_name, Name.Variable),
+
+ # all your normal paired delimiters for your programming enjoyment
+ (r'(\(|\))', Punctuation),
+ (r'(\[|\])', Punctuation),
+ (r'(\{|\})', Punctuation),
+
+ # the # symbol is shorthand for a lambda function
+ (r'#', Punctuation),
+ ]
+ }
diff --git a/pygments/lexers/macaulay2.py b/pygments/lexers/macaulay2.py
new file mode 100644
index 0000000..aaf750b
--- /dev/null
+++ b/pygments/lexers/macaulay2.py
@@ -0,0 +1,1739 @@
+"""
+ pygments.lexers.macaulay2
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Macaulay2.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Keyword, Name, String, Text
+
+__all__ = ['Macaulay2Lexer']
+
+# Auto-generated for Macaulay2-1.21. Do not modify this file manually.
+
+M2KEYWORDS = (
+ "and",
+ "break",
+ "catch",
+ "continue",
+ "do",
+ "elapsedTime",
+ "elapsedTiming",
+ "else",
+ "for",
+ "from",
+ "global",
+ "if",
+ "in",
+ "list",
+ "local",
+ "new",
+ "not",
+ "of",
+ "or",
+ "return",
+ "shield",
+ "SPACE",
+ "step",
+ "symbol",
+ "then",
+ "threadVariable",
+ "throw",
+ "time",
+ "timing",
+ "to",
+ "try",
+ "when",
+ "while",
+ "xor"
+ )
+
+M2DATATYPES = (
+ "Adjacent",
+ "AffineVariety",
+ "Analyzer",
+ "ANCHOR",
+ "AngleBarList",
+ "Array",
+ "AssociativeExpression",
+ "Bag",
+ "BasicList",
+ "BettiTally",
+ "BinaryOperation",
+ "BLOCKQUOTE",
+ "BODY",
+ "BOLD",
+ "Boolean",
+ "BR",
+ "CacheFunction",
+ "CacheTable",
+ "CC",
+ "CDATA",
+ "ChainComplex",
+ "ChainComplexMap",
+ "CODE",
+ "CoherentSheaf",
+ "Command",
+ "COMMENT",
+ "CompiledFunction",
+ "CompiledFunctionBody",
+ "CompiledFunctionClosure",
+ "ComplexField",
+ "Constant",
+ "Database",
+ "DD",
+ "Descent",
+ "Describe",
+ "Dictionary",
+ "DIV",
+ "Divide",
+ "DL",
+ "DocumentTag",
+ "DT",
+ "Eliminate",
+ "EM",
+ "EngineRing",
+ "Equation",
+ "ExampleItem",
+ "Expression",
+ "File",
+ "FilePosition",
+ "FractionField",
+ "Function",
+ "FunctionApplication",
+ "FunctionBody",
+ "FunctionClosure",
+ "GaloisField",
+ "GeneralOrderedMonoid",
+ "GlobalDictionary",
+ "GradedModule",
+ "GradedModuleMap",
+ "GroebnerBasis",
+ "GroebnerBasisOptions",
+ "HashTable",
+ "HEAD",
+ "HEADER1",
+ "HEADER2",
+ "HEADER3",
+ "HEADER4",
+ "HEADER5",
+ "HEADER6",
+ "HeaderType",
+ "Holder",
+ "HR",
+ "HREF",
+ "HTML",
+ "Hybrid",
+ "Hypertext",
+ "HypertextContainer",
+ "HypertextParagraph",
+ "Ideal",
+ "IMG",
+ "ImmutableType",
+ "IndeterminateNumber",
+ "IndexedVariable",
+ "IndexedVariableTable",
+ "InexactField",
+ "InexactFieldFamily",
+ "InexactNumber",
+ "InfiniteNumber",
+ "IntermediateMarkUpType",
+ "ITALIC",
+ "Iterator",
+ "Keyword",
+ "LABEL",
+ "LATER",
+ "LI",
+ "LINK",
+ "List",
+ "LITERAL",
+ "LocalDictionary",
+ "LowerBound",
+ "Manipulator",
+ "MapExpression",
+ "MarkUpType",
+ "Matrix",
+ "MatrixExpression",
+ "MENU",
+ "META",
+ "MethodFunction",
+ "MethodFunctionBinary",
+ "MethodFunctionSingle",
+ "MethodFunctionWithOptions",
+ "Minus",
+ "Module",
+ "Monoid",
+ "MonoidElement",
+ "MonomialIdeal",
+ "MultigradedBettiTally",
+ "MutableHashTable",
+ "MutableList",
+ "MutableMatrix",
+ "Net",
+ "NetFile",
+ "NonAssociativeProduct",
+ "Nothing",
+ "Number",
+ "NumberedVerticalList",
+ "OL",
+ "OneExpression",
+ "Option",
+ "OptionTable",
+ "OrderedMonoid",
+ "Package",
+ "PARA",
+ "Parenthesize",
+ "Parser",
+ "Partition",
+ "PolynomialRing",
+ "Power",
+ "PRE",
+ "Product",
+ "ProductOrder",
+ "Program",
+ "ProgramRun",
+ "ProjectiveHilbertPolynomial",
+ "ProjectiveVariety",
+ "Pseudocode",
+ "QQ",
+ "QuotientRing",
+ "RealField",
+ "Resolution",
+ "Ring",
+ "RingElement",
+ "RingFamily",
+ "RingMap",
+ "RowExpression",
+ "RR",
+ "RRi",
+ "SCRIPT",
+ "ScriptedFunctor",
+ "SelfInitializingType",
+ "Sequence",
+ "Set",
+ "SheafExpression",
+ "SheafOfRings",
+ "SMALL",
+ "SPAN",
+ "SparseMonomialVectorExpression",
+ "SparseVectorExpression",
+ "String",
+ "STRONG",
+ "STYLE",
+ "SUB",
+ "Subscript",
+ "SUBSECTION",
+ "Sum",
+ "SumOfTwists",
+ "SUP",
+ "Superscript",
+ "Symbol",
+ "SymbolBody",
+ "TABLE",
+ "Table",
+ "Tally",
+ "Task",
+ "TD",
+ "TestInput",
+ "TEX",
+ "TH",
+ "Thing",
+ "Time",
+ "TITLE",
+ "TO",
+ "TO2",
+ "TOH",
+ "TR",
+ "TT",
+ "Type",
+ "UL",
+ "URL",
+ "Variety",
+ "Vector",
+ "VectorExpression",
+ "VerticalList",
+ "VirtualTally",
+ "VisibleList",
+ "WrapperType",
+ "ZeroExpression",
+ "ZZ"
+ )
+
+M2FUNCTIONS = (
+ "about",
+ "abs",
+ "accumulate",
+ "acos",
+ "acosh",
+ "acot",
+ "acoth",
+ "addCancelTask",
+ "addDependencyTask",
+ "addEndFunction",
+ "addHook",
+ "addStartFunction",
+ "addStartTask",
+ "adjoint",
+ "agm",
+ "alarm",
+ "all",
+ "ambient",
+ "analyticSpread",
+ "ancestor",
+ "ancestors",
+ "andP",
+ "ann",
+ "annihilator",
+ "antipode",
+ "any",
+ "append",
+ "applicationDirectory",
+ "apply",
+ "applyKeys",
+ "applyPairs",
+ "applyTable",
+ "applyValues",
+ "apropos",
+ "arXiv",
+ "ascii",
+ "asin",
+ "asinh",
+ "ass",
+ "assert",
+ "associatedGradedRing",
+ "associatedPrimes",
+ "atan",
+ "atan2",
+ "atanh",
+ "atEndOfFile",
+ "autoload",
+ "baseFilename",
+ "baseName",
+ "baseRing",
+ "basis",
+ "beginDocumentation",
+ "benchmark",
+ "BesselJ",
+ "BesselY",
+ "Beta",
+ "betti",
+ "between",
+ "binomial",
+ "borel",
+ "cacheValue",
+ "cancelTask",
+ "capture",
+ "ceiling",
+ "centerString",
+ "chainComplex",
+ "changeBase",
+ "char",
+ "characters",
+ "charAnalyzer",
+ "check",
+ "checkDegrees",
+ "chi",
+ "class",
+ "clean",
+ "clearEcho",
+ "code",
+ "codim",
+ "coefficient",
+ "coefficientRing",
+ "coefficients",
+ "cohomology",
+ "coimage",
+ "coker",
+ "cokernel",
+ "collectGarbage",
+ "columnAdd",
+ "columnate",
+ "columnMult",
+ "columnPermute",
+ "columnRankProfile",
+ "columnSwap",
+ "combine",
+ "commandInterpreter",
+ "commonest",
+ "commonRing",
+ "comodule",
+ "complement",
+ "complete",
+ "components",
+ "compose",
+ "compositions",
+ "compress",
+ "concatenate",
+ "conductor",
+ "cone",
+ "conjugate",
+ "connectionCount",
+ "constParser",
+ "content",
+ "contract",
+ "conwayPolynomial",
+ "copy",
+ "copyDirectory",
+ "copyFile",
+ "cos",
+ "cosh",
+ "cot",
+ "cotangentSheaf",
+ "coth",
+ "cover",
+ "coverMap",
+ "cpuTime",
+ "createTask",
+ "csc",
+ "csch",
+ "currentColumnNumber",
+ "currentDirectory",
+ "currentPosition",
+ "currentRowNumber",
+ "currentTime",
+ "deadParser",
+ "debug",
+ "debugError",
+ "decompose",
+ "deepSplice",
+ "default",
+ "degree",
+ "degreeGroup",
+ "degreeLength",
+ "degrees",
+ "degreesMonoid",
+ "degreesRing",
+ "delete",
+ "demark",
+ "denominator",
+ "depth",
+ "describe",
+ "det",
+ "determinant",
+ "diagonalMatrix",
+ "diameter",
+ "dictionary",
+ "diff",
+ "difference",
+ "Digamma",
+ "dim",
+ "directSum",
+ "disassemble",
+ "discriminant",
+ "dismiss",
+ "distinguished",
+ "divideByVariable",
+ "doc",
+ "document",
+ "drop",
+ "dual",
+ "eagonNorthcott",
+ "echoOff",
+ "echoOn",
+ "eigenvalues",
+ "eigenvectors",
+ "eint",
+ "elements",
+ "eliminate",
+ "End",
+ "endPackage",
+ "entries",
+ "erase",
+ "erf",
+ "erfc",
+ "error",
+ "euler",
+ "eulers",
+ "even",
+ "EXAMPLE",
+ "examples",
+ "exec",
+ "exp",
+ "expectedReesIdeal",
+ "expm1",
+ "exponents",
+ "export",
+ "exportFrom",
+ "exportMutable",
+ "expression",
+ "extend",
+ "exteriorPower",
+ "factor",
+ "Fano",
+ "fileExecutable",
+ "fileExists",
+ "fileLength",
+ "fileMode",
+ "fileReadable",
+ "fileTime",
+ "fileWritable",
+ "fillMatrix",
+ "findFiles",
+ "findHeft",
+ "findProgram",
+ "findSynonyms",
+ "first",
+ "firstkey",
+ "fittingIdeal",
+ "flagLookup",
+ "flatten",
+ "flattenRing",
+ "flip",
+ "floor",
+ "fold",
+ "forceGB",
+ "fork",
+ "format",
+ "formation",
+ "frac",
+ "fraction",
+ "frames",
+ "fromDividedPowers",
+ "fromDual",
+ "functionBody",
+ "futureParser",
+ "Gamma",
+ "gb",
+ "gbRemove",
+ "gbSnapshot",
+ "gcd",
+ "gcdCoefficients",
+ "gcdLLL",
+ "GCstats",
+ "genera",
+ "generateAssertions",
+ "generator",
+ "generators",
+ "genericMatrix",
+ "genericSkewMatrix",
+ "genericSymmetricMatrix",
+ "gens",
+ "genus",
+ "get",
+ "getc",
+ "getChangeMatrix",
+ "getenv",
+ "getGlobalSymbol",
+ "getNetFile",
+ "getNonUnit",
+ "getPrimeWithRootOfUnity",
+ "getSymbol",
+ "getWWW",
+ "GF",
+ "globalAssign",
+ "globalAssignFunction",
+ "globalAssignment",
+ "globalReleaseFunction",
+ "gradedModule",
+ "gradedModuleMap",
+ "gramm",
+ "graphIdeal",
+ "graphRing",
+ "Grassmannian",
+ "groebnerBasis",
+ "groupID",
+ "hash",
+ "hashTable",
+ "heft",
+ "height",
+ "hermite",
+ "hilbertFunction",
+ "hilbertPolynomial",
+ "hilbertSeries",
+ "hold",
+ "Hom",
+ "homogenize",
+ "homology",
+ "homomorphism",
+ "hooks",
+ "horizontalJoin",
+ "html",
+ "httpHeaders",
+ "hypertext",
+ "icFracP",
+ "icFractions",
+ "icMap",
+ "icPIdeal",
+ "ideal",
+ "idealizer",
+ "identity",
+ "image",
+ "imaginaryPart",
+ "importFrom",
+ "independentSets",
+ "index",
+ "indices",
+ "inducedMap",
+ "inducesWellDefinedMap",
+ "info",
+ "input",
+ "insert",
+ "installAssignmentMethod",
+ "installedPackages",
+ "installHilbertFunction",
+ "installMethod",
+ "installMinprimes",
+ "installPackage",
+ "instance",
+ "instances",
+ "integralClosure",
+ "integrate",
+ "intersect",
+ "intersectInP",
+ "intersection",
+ "interval",
+ "inverse",
+ "inverseErf",
+ "inversePermutation",
+ "inverseRegularizedBeta",
+ "inverseRegularizedGamma",
+ "inverseSystem",
+ "irreducibleCharacteristicSeries",
+ "irreducibleDecomposition",
+ "isAffineRing",
+ "isANumber",
+ "isBorel",
+ "isc",
+ "isCanceled",
+ "isCommutative",
+ "isConstant",
+ "isDirectory",
+ "isDirectSum",
+ "isEmpty",
+ "isField",
+ "isFinite",
+ "isFinitePrimeField",
+ "isFreeModule",
+ "isGlobalSymbol",
+ "isHomogeneous",
+ "isIdeal",
+ "isInfinite",
+ "isInjective",
+ "isInputFile",
+ "isIsomorphic",
+ "isIsomorphism",
+ "isLinearType",
+ "isListener",
+ "isLLL",
+ "isMember",
+ "isModule",
+ "isMonomialIdeal",
+ "isNormal",
+ "isOpen",
+ "isOutputFile",
+ "isPolynomialRing",
+ "isPrimary",
+ "isPrime",
+ "isPrimitive",
+ "isPseudoprime",
+ "isQuotientModule",
+ "isQuotientOf",
+ "isQuotientRing",
+ "isReady",
+ "isReal",
+ "isReduction",
+ "isRegularFile",
+ "isRing",
+ "isSkewCommutative",
+ "isSorted",
+ "isSquareFree",
+ "isStandardGradedPolynomialRing",
+ "isSubmodule",
+ "isSubquotient",
+ "isSubset",
+ "isSupportedInZeroLocus",
+ "isSurjective",
+ "isTable",
+ "isUnit",
+ "isWellDefined",
+ "isWeylAlgebra",
+ "iterator",
+ "jacobian",
+ "jacobianDual",
+ "join",
+ "ker",
+ "kernel",
+ "kernelLLL",
+ "kernelOfLocalization",
+ "keys",
+ "kill",
+ "koszul",
+ "last",
+ "lcm",
+ "leadCoefficient",
+ "leadComponent",
+ "leadMonomial",
+ "leadTerm",
+ "left",
+ "length",
+ "letterParser",
+ "lift",
+ "liftable",
+ "limitFiles",
+ "limitProcesses",
+ "lines",
+ "linkFile",
+ "listForm",
+ "listSymbols",
+ "LLL",
+ "lngamma",
+ "load",
+ "loadPackage",
+ "localDictionaries",
+ "localize",
+ "locate",
+ "log",
+ "log1p",
+ "lookup",
+ "lookupCount",
+ "LUdecomposition",
+ "M2CODE",
+ "makeDirectory",
+ "makeDocumentTag",
+ "makePackageIndex",
+ "makeS2",
+ "map",
+ "markedGB",
+ "match",
+ "mathML",
+ "matrix",
+ "max",
+ "maxPosition",
+ "member",
+ "memoize",
+ "memoizeClear",
+ "memoizeValues",
+ "merge",
+ "mergePairs",
+ "method",
+ "methodOptions",
+ "methods",
+ "midpoint",
+ "min",
+ "mingens",
+ "mingle",
+ "minimalBetti",
+ "minimalPresentation",
+ "minimalPrimes",
+ "minimalReduction",
+ "minimizeFilename",
+ "minors",
+ "minPosition",
+ "minPres",
+ "minprimes",
+ "minus",
+ "mkdir",
+ "mod",
+ "module",
+ "modulo",
+ "monoid",
+ "monomialCurveIdeal",
+ "monomialIdeal",
+ "monomials",
+ "monomialSubideal",
+ "moveFile",
+ "multidegree",
+ "multidoc",
+ "multigraded",
+ "multiplicity",
+ "mutable",
+ "mutableIdentity",
+ "mutableMatrix",
+ "nanosleep",
+ "needs",
+ "needsPackage",
+ "net",
+ "netList",
+ "newClass",
+ "newCoordinateSystem",
+ "newNetFile",
+ "newPackage",
+ "newRing",
+ "next",
+ "nextkey",
+ "nextPrime",
+ "NNParser",
+ "nonspaceAnalyzer",
+ "norm",
+ "normalCone",
+ "notImplemented",
+ "nullhomotopy",
+ "nullParser",
+ "nullSpace",
+ "number",
+ "numcols",
+ "numColumns",
+ "numerator",
+ "numeric",
+ "numericInterval",
+ "numgens",
+ "numRows",
+ "numrows",
+ "odd",
+ "oeis",
+ "ofClass",
+ "on",
+ "openDatabase",
+ "openDatabaseOut",
+ "openFiles",
+ "openIn",
+ "openInOut",
+ "openListener",
+ "openOut",
+ "openOutAppend",
+ "optionalSignParser",
+ "options",
+ "optP",
+ "orP",
+ "override",
+ "pack",
+ "package",
+ "packageTemplate",
+ "pad",
+ "pager",
+ "pairs",
+ "parent",
+ "part",
+ "partition",
+ "partitions",
+ "parts",
+ "pdim",
+ "peek",
+ "permanents",
+ "permutations",
+ "pfaffians",
+ "pivots",
+ "plus",
+ "poincare",
+ "poincareN",
+ "polarize",
+ "poly",
+ "position",
+ "positions",
+ "power",
+ "powermod",
+ "precision",
+ "preimage",
+ "prepend",
+ "presentation",
+ "pretty",
+ "primaryComponent",
+ "primaryDecomposition",
+ "print",
+ "printerr",
+ "printString",
+ "processID",
+ "product",
+ "profile",
+ "Proj",
+ "projectiveHilbertPolynomial",
+ "promote",
+ "protect",
+ "prune",
+ "pseudocode",
+ "pseudoRemainder",
+ "pushForward",
+ "QQParser",
+ "QRDecomposition",
+ "quotient",
+ "quotientRemainder",
+ "radical",
+ "radicalContainment",
+ "random",
+ "randomKRationalPoint",
+ "randomMutableMatrix",
+ "rank",
+ "read",
+ "readDirectory",
+ "readlink",
+ "readPackage",
+ "realPart",
+ "realpath",
+ "recursionDepth",
+ "reducedRowEchelonForm",
+ "reduceHilbert",
+ "reductionNumber",
+ "reesAlgebra",
+ "reesAlgebraIdeal",
+ "reesIdeal",
+ "regex",
+ "regexQuote",
+ "registerFinalizer",
+ "regSeqInIdeal",
+ "regularity",
+ "regularizedBeta",
+ "regularizedGamma",
+ "relations",
+ "relativizeFilename",
+ "remainder",
+ "remove",
+ "removeDirectory",
+ "removeFile",
+ "removeLowestDimension",
+ "reorganize",
+ "replace",
+ "res",
+ "reshape",
+ "resolution",
+ "resultant",
+ "reverse",
+ "right",
+ "ring",
+ "ringFromFractions",
+ "roots",
+ "rotate",
+ "round",
+ "rowAdd",
+ "rowMult",
+ "rowPermute",
+ "rowRankProfile",
+ "rowSwap",
+ "rsort",
+ "run",
+ "runHooks",
+ "runLengthEncode",
+ "runProgram",
+ "same",
+ "saturate",
+ "scan",
+ "scanKeys",
+ "scanLines",
+ "scanPairs",
+ "scanValues",
+ "schedule",
+ "schreyerOrder",
+ "Schubert",
+ "searchPath",
+ "sec",
+ "sech",
+ "seeParsing",
+ "select",
+ "selectInSubring",
+ "selectVariables",
+ "separate",
+ "separateRegexp",
+ "sequence",
+ "serialNumber",
+ "set",
+ "setEcho",
+ "setGroupID",
+ "setIOExclusive",
+ "setIOSynchronized",
+ "setIOUnSynchronized",
+ "setRandomSeed",
+ "setup",
+ "setupEmacs",
+ "sheaf",
+ "sheafHom",
+ "show",
+ "showHtml",
+ "showTex",
+ "simpleDocFrob",
+ "sin",
+ "singularLocus",
+ "sinh",
+ "size",
+ "size2",
+ "sleep",
+ "smithNormalForm",
+ "solve",
+ "someTerms",
+ "sort",
+ "sortColumns",
+ "source",
+ "span",
+ "Spec",
+ "specialFiber",
+ "specialFiberIdeal",
+ "splice",
+ "splitWWW",
+ "sqrt",
+ "stack",
+ "stacksProject",
+ "standardForm",
+ "standardPairs",
+ "stashValue",
+ "status",
+ "style",
+ "sub",
+ "sublists",
+ "submatrix",
+ "submatrixByDegrees",
+ "subquotient",
+ "subsets",
+ "substitute",
+ "substring",
+ "subtable",
+ "sum",
+ "super",
+ "support",
+ "SVD",
+ "switch",
+ "sylvesterMatrix",
+ "symbolBody",
+ "symlinkDirectory",
+ "symlinkFile",
+ "symmetricAlgebra",
+ "symmetricAlgebraIdeal",
+ "symmetricKernel",
+ "symmetricPower",
+ "synonym",
+ "SYNOPSIS",
+ "syz",
+ "syzygyScheme",
+ "table",
+ "take",
+ "tally",
+ "tan",
+ "tangentCone",
+ "tangentSheaf",
+ "tanh",
+ "target",
+ "taskResult",
+ "temporaryFileName",
+ "tensor",
+ "tensorAssociativity",
+ "terminalParser",
+ "terms",
+ "TEST",
+ "testHunekeQuestion",
+ "tests",
+ "tex",
+ "texMath",
+ "times",
+ "toAbsolutePath",
+ "toCC",
+ "toDividedPowers",
+ "toDual",
+ "toExternalString",
+ "toField",
+ "toList",
+ "toLower",
+ "top",
+ "topCoefficients",
+ "topComponents",
+ "toRR",
+ "toRRi",
+ "toSequence",
+ "toString",
+ "toUpper",
+ "trace",
+ "transpose",
+ "trim",
+ "truncate",
+ "truncateOutput",
+ "tutorial",
+ "ultimate",
+ "unbag",
+ "uncurry",
+ "undocumented",
+ "uniform",
+ "uninstallAllPackages",
+ "uninstallPackage",
+ "unique",
+ "uniquePermutations",
+ "unsequence",
+ "unstack",
+ "urlEncode",
+ "use",
+ "userSymbols",
+ "utf8",
+ "utf8check",
+ "utf8substring",
+ "validate",
+ "value",
+ "values",
+ "variety",
+ "vars",
+ "vector",
+ "versalEmbedding",
+ "wait",
+ "wedgeProduct",
+ "weightRange",
+ "whichGm",
+ "width",
+ "wikipedia",
+ "wrap",
+ "youngest",
+ "zero",
+ "zeta",
+ "ZZParser"
+ )
+
+M2CONSTANTS = (
+ "AbstractToricVarieties",
+ "Acknowledgement",
+ "AdditionalPaths",
+ "AdjointIdeal",
+ "AfterEval",
+ "AfterNoPrint",
+ "AfterPrint",
+ "AInfinity",
+ "AlgebraicSplines",
+ "Algorithm",
+ "Alignment",
+ "AllCodimensions",
+ "allowableThreads",
+ "AnalyzeSheafOnP1",
+ "applicationDirectorySuffix",
+ "argument",
+ "Ascending",
+ "AssociativeAlgebras",
+ "Authors",
+ "AuxiliaryFiles",
+ "backtrace",
+ "Bareiss",
+ "BaseFunction",
+ "baseRings",
+ "BaseRow",
+ "BasisElementLimit",
+ "Bayer",
+ "BeforePrint",
+ "BeginningMacaulay2",
+ "Benchmark",
+ "Bertini",
+ "BettiCharacters",
+ "BGG",
+ "BIBasis",
+ "Binary",
+ "Binomial",
+ "BinomialEdgeIdeals",
+ "Binomials",
+ "BKZ",
+ "blockMatrixForm",
+ "Body",
+ "BoijSoederberg",
+ "Book3264Examples",
+ "BooleanGB",
+ "Boxes",
+ "Browse",
+ "Bruns",
+ "cache",
+ "CacheExampleOutput",
+ "CallLimit",
+ "CannedExample",
+ "CatalanConstant",
+ "Caveat",
+ "Center",
+ "Certification",
+ "ChainComplexExtras",
+ "ChainComplexOperations",
+ "ChangeMatrix",
+ "CharacteristicClasses",
+ "CheckDocumentation",
+ "Chordal",
+ "Classic",
+ "clearAll",
+ "clearOutput",
+ "close",
+ "closeIn",
+ "closeOut",
+ "ClosestFit",
+ "Code",
+ "CodimensionLimit",
+ "CodingTheory",
+ "CoefficientRing",
+ "Cofactor",
+ "CohenEngine",
+ "CohenTopLevel",
+ "CohomCalg",
+ "CoincidentRootLoci",
+ "commandLine",
+ "compactMatrixForm",
+ "Complement",
+ "CompleteIntersection",
+ "CompleteIntersectionResolutions",
+ "Complexes",
+ "ConductorElement",
+ "Configuration",
+ "ConformalBlocks",
+ "Consequences",
+ "Constants",
+ "Contributors",
+ "ConvexInterface",
+ "ConwayPolynomials",
+ "copyright",
+ "Core",
+ "CorrespondenceScrolls",
+ "CotangentSchubert",
+ "Cremona",
+ "currentFileDirectory",
+ "currentFileName",
+ "currentLayout",
+ "currentPackage",
+ "Cyclotomic",
+ "Date",
+ "dd",
+ "DebuggingMode",
+ "debuggingMode",
+ "debugLevel",
+ "DecomposableSparseSystems",
+ "Decompose",
+ "Default",
+ "defaultPrecision",
+ "Degree",
+ "DegreeGroup",
+ "DegreeLift",
+ "DegreeLimit",
+ "DegreeMap",
+ "DegreeOrder",
+ "DegreeRank",
+ "Degrees",
+ "Dense",
+ "Density",
+ "Depth",
+ "Descending",
+ "Description",
+ "DeterminantalRepresentations",
+ "DGAlgebras",
+ "dictionaryPath",
+ "DiffAlg",
+ "Dispatch",
+ "DivideConquer",
+ "DividedPowers",
+ "Divisor",
+ "Dmodules",
+ "docExample",
+ "docTemplate",
+ "Down",
+ "Dynamic",
+ "EagonResolution",
+ "EdgeIdeals",
+ "edit",
+ "EigenSolver",
+ "EisenbudHunekeVasconcelos",
+ "Elimination",
+ "EliminationMatrices",
+ "EllipticCurves",
+ "EllipticIntegrals",
+ "Email",
+ "end",
+ "endl",
+ "Engine",
+ "engineDebugLevel",
+ "EngineTests",
+ "EnumerationCurves",
+ "environment",
+ "EquivariantGB",
+ "errorDepth",
+ "EulerConstant",
+ "Example",
+ "ExampleFiles",
+ "ExampleSystems",
+ "Exclude",
+ "exit",
+ "Ext",
+ "ExteriorIdeals",
+ "ExteriorModules",
+ "false",
+ "FastMinors",
+ "FastNonminimal",
+ "FGLM",
+ "fileDictionaries",
+ "fileExitHooks",
+ "FileName",
+ "FindOne",
+ "FiniteFittingIdeals",
+ "First",
+ "FirstPackage",
+ "FlatMonoid",
+ "Flexible",
+ "flush",
+ "FollowLinks",
+ "ForeignFunctions",
+ "FormalGroupLaws",
+ "Format",
+ "FourierMotzkin",
+ "FourTiTwo",
+ "fpLLL",
+ "FrobeniusThresholds",
+ "FunctionFieldDesingularization",
+ "GBDegrees",
+ "gbTrace",
+ "GenerateAssertions",
+ "Generic",
+ "GenericInitialIdeal",
+ "GeometricDecomposability",
+ "gfanInterface",
+ "Givens",
+ "GKMVarieties",
+ "GLex",
+ "Global",
+ "GlobalAssignHook",
+ "globalAssignmentHooks",
+ "GlobalHookStore",
+ "GlobalReleaseHook",
+ "Gorenstein",
+ "GradedLieAlgebras",
+ "GraphicalModels",
+ "GraphicalModelsMLE",
+ "Graphics",
+ "Graphs",
+ "GRevLex",
+ "GroebnerStrata",
+ "GroebnerWalk",
+ "GroupLex",
+ "GroupRevLex",
+ "GTZ",
+ "Hadamard",
+ "handleInterrupts",
+ "HardDegreeLimit",
+ "Heading",
+ "Headline",
+ "Heft",
+ "Height",
+ "help",
+ "Hermite",
+ "Hermitian",
+ "HH",
+ "hh",
+ "HigherCIOperators",
+ "HighestWeights",
+ "Hilbert",
+ "HodgeIntegrals",
+ "homeDirectory",
+ "HomePage",
+ "Homogeneous",
+ "Homogeneous2",
+ "HomotopyLieAlgebra",
+ "HorizontalSpace",
+ "HyperplaneArrangements",
+ "id",
+ "IgnoreExampleErrors",
+ "ii",
+ "incomparable",
+ "Increment",
+ "indeterminate",
+ "Index",
+ "indexComponents",
+ "infinity",
+ "InfoDirSection",
+ "infoHelp",
+ "Inhomogeneous",
+ "Inputs",
+ "InstallPrefix",
+ "IntegralClosure",
+ "interpreterDepth",
+ "Intersection",
+ "InvariantRing",
+ "InverseMethod",
+ "Inverses",
+ "InverseSystems",
+ "Invertible",
+ "InvolutiveBases",
+ "Isomorphism",
+ "Item",
+ "Iterate",
+ "Jacobian",
+ "Jets",
+ "Join",
+ "JSON",
+ "Jupyter",
+ "K3Carpets",
+ "K3Surfaces",
+ "Keep",
+ "KeepFiles",
+ "KeepZeroes",
+ "Key",
+ "Keywords",
+ "Kronecker",
+ "KustinMiller",
+ "lastMatch",
+ "LatticePolytopes",
+ "Layout",
+ "Left",
+ "LengthLimit",
+ "Lex",
+ "LexIdeals",
+ "Licenses",
+ "LieTypes",
+ "Limit",
+ "Linear",
+ "LinearAlgebra",
+ "LinearTruncations",
+ "lineNumber",
+ "listLocalSymbols",
+ "listUserSymbols",
+ "LLLBases",
+ "loadDepth",
+ "LoadDocumentation",
+ "loadedFiles",
+ "loadedPackages",
+ "Local",
+ "LocalRings",
+ "LongPolynomial",
+ "M0nbar",
+ "Macaulay2Doc",
+ "MakeDocumentation",
+ "MakeHTML",
+ "MakeInfo",
+ "MakeLinks",
+ "MakePDF",
+ "MapleInterface",
+ "Markov",
+ "Matroids",
+ "maxAllowableThreads",
+ "maxExponent",
+ "MaximalRank",
+ "MaxReductionCount",
+ "MCMApproximations",
+ "MergeTeX",
+ "minExponent",
+ "MinimalGenerators",
+ "MinimalMatrix",
+ "minimalPresentationMap",
+ "minimalPresentationMapInv",
+ "MinimalPrimes",
+ "Minimize",
+ "MinimumVersion",
+ "Miura",
+ "MixedMultiplicity",
+ "ModuleDeformations",
+ "MonodromySolver",
+ "Monomial",
+ "MonomialAlgebras",
+ "MonomialIntegerPrograms",
+ "MonomialOrbits",
+ "MonomialOrder",
+ "Monomials",
+ "MonomialSize",
+ "MultiGradedRationalMap",
+ "MultiplicitySequence",
+ "MultiplierIdeals",
+ "MultiplierIdealsDim2",
+ "MultiprojectiveVarieties",
+ "NAGtypes",
+ "Name",
+ "Nauty",
+ "NautyGraphs",
+ "NCAlgebra",
+ "NCLex",
+ "NewFromMethod",
+ "newline",
+ "NewMethod",
+ "NewOfFromMethod",
+ "NewOfMethod",
+ "nil",
+ "Node",
+ "NoetherianOperators",
+ "NoetherNormalization",
+ "NonminimalComplexes",
+ "NoPrint",
+ "Normaliz",
+ "NormalToricVarieties",
+ "notify",
+ "NTL",
+ "null",
+ "nullaryMethods",
+ "NumericalAlgebraicGeometry",
+ "NumericalCertification",
+ "NumericalImplicitization",
+ "NumericalLinearAlgebra",
+ "NumericalSchubertCalculus",
+ "NumericSolutions",
+ "OldPolyhedra",
+ "OldToricVectorBundles",
+ "OnlineLookup",
+ "OO",
+ "oo",
+ "ooo",
+ "oooo",
+ "OpenMath",
+ "operatorAttributes",
+ "OptionalComponentsPresent",
+ "Options",
+ "Order",
+ "order",
+ "OutputDictionary",
+ "Outputs",
+ "PackageCitations",
+ "PackageDictionary",
+ "PackageExports",
+ "PackageImports",
+ "PackageTemplate",
+ "PairLimit",
+ "PairsRemaining",
+ "Parametrization",
+ "Parsing",
+ "path",
+ "PencilsOfQuadrics",
+ "Permanents",
+ "PHCpack",
+ "PhylogeneticTrees",
+ "pi",
+ "PieriMaps",
+ "PlaneCurveSingularities",
+ "Points",
+ "Polyhedra",
+ "Polymake",
+ "Posets",
+ "Position",
+ "PositivityToricBundles",
+ "POSIX",
+ "Postfix",
+ "Pre",
+ "Precision",
+ "Prefix",
+ "prefixDirectory",
+ "prefixPath",
+ "PrimaryDecomposition",
+ "PrimaryTag",
+ "PrimitiveElement",
+ "Print",
+ "printingAccuracy",
+ "printingLeadLimit",
+ "printingPrecision",
+ "printingSeparator",
+ "printingTimeLimit",
+ "printingTrailLimit",
+ "printWidth",
+ "Probability",
+ "profileSummary",
+ "programPaths",
+ "Projective",
+ "Prune",
+ "PruneComplex",
+ "pruningMap",
+ "PseudomonomialPrimaryDecomposition",
+ "Pullback",
+ "PushForward",
+ "Python",
+ "QthPower",
+ "Quasidegrees",
+ "QuaternaryQuartics",
+ "QuillenSuslin",
+ "quit",
+ "Quotient",
+ "Radical",
+ "RadicalCodim1",
+ "RaiseError",
+ "RandomCanonicalCurves",
+ "RandomComplexes",
+ "RandomCurves",
+ "RandomCurvesOverVerySmallFiniteFields",
+ "RandomGenus14Curves",
+ "RandomIdeals",
+ "RandomMonomialIdeals",
+ "RandomObjects",
+ "RandomPlaneCurves",
+ "RandomPoints",
+ "RandomSpaceCurves",
+ "Range",
+ "RationalMaps",
+ "RationalPoints",
+ "RationalPoints2",
+ "ReactionNetworks",
+ "RealFP",
+ "RealQP",
+ "RealQP1",
+ "RealRoots",
+ "RealRR",
+ "RealXD",
+ "recursionLimit",
+ "Reduce",
+ "ReesAlgebra",
+ "References",
+ "ReflexivePolytopesDB",
+ "Regularity",
+ "RelativeCanonicalResolution",
+ "Reload",
+ "RemakeAllDocumentation",
+ "RerunExamples",
+ "ResidualIntersections",
+ "ResLengthThree",
+ "ResolutionsOfStanleyReisnerRings",
+ "restart",
+ "Result",
+ "Resultants",
+ "returnCode",
+ "Reverse",
+ "RevLex",
+ "Right",
+ "rootPath",
+ "rootURI",
+ "RunDirectory",
+ "RunExamples",
+ "RunExternalM2",
+ "Saturation",
+ "Schubert2",
+ "SchurComplexes",
+ "SchurFunctors",
+ "SchurRings",
+ "scriptCommandLine",
+ "SCSCP",
+ "SectionRing",
+ "SeeAlso",
+ "SegreClasses",
+ "SemidefiniteProgramming",
+ "Seminormalization",
+ "SeparateExec",
+ "Serialization",
+ "sheafExt",
+ "ShimoyamaYokoyama",
+ "showClassStructure",
+ "showStructure",
+ "showUserStructure",
+ "SimpleDoc",
+ "SimplicialComplexes",
+ "SimplicialDecomposability",
+ "SimplicialPosets",
+ "SimplifyFractions",
+ "SizeLimit",
+ "SkewCommutative",
+ "SlackIdeals",
+ "SLnEquivariantMatrices",
+ "SLPexpressions",
+ "Sort",
+ "SortStrategy",
+ "SourceCode",
+ "SourceRing",
+ "SpaceCurves",
+ "SparseResultants",
+ "SpechtModule",
+ "SpecialFanoFourfolds",
+ "SpectralSequences",
+ "SRdeformations",
+ "Standard",
+ "StartWithOneMinor",
+ "StatePolytope",
+ "StatGraphs",
+ "stderr",
+ "stdio",
+ "StopBeforeComputation",
+ "stopIfError",
+ "StopIteration",
+ "StopWithMinimalGenerators",
+ "Strategy",
+ "Strict",
+ "StronglyStableIdeals",
+ "Style",
+ "SubalgebraBases",
+ "Subnodes",
+ "SubringLimit",
+ "subscript",
+ "Sugarless",
+ "SumsOfSquares",
+ "SuperLinearAlgebra",
+ "superscript",
+ "SVDComplexes",
+ "SwitchingFields",
+ "SymbolicPowers",
+ "SymmetricPolynomials",
+ "Synopsis",
+ "Syzygies",
+ "SyzygyLimit",
+ "SyzygyMatrix",
+ "SyzygyRows",
+ "TangentCone",
+ "TateOnProducts",
+ "TensorComplexes",
+ "Test",
+ "testExample",
+ "TestIdeals",
+ "TeXmacs",
+ "Text",
+ "ThinSincereQuivers",
+ "ThreadedGB",
+ "Threshold",
+ "Topcom",
+ "topLevelMode",
+ "Tor",
+ "TorAlgebra",
+ "Toric",
+ "ToricInvariants",
+ "ToricTopology",
+ "ToricVectorBundles",
+ "Torsion",
+ "TotalPairs",
+ "Tree",
+ "TriangularSets",
+ "Triangulations",
+ "Tries",
+ "Trim",
+ "Triplets",
+ "Tropical",
+ "true",
+ "Truncate",
+ "Truncations",
+ "TSpreadIdeals",
+ "TypicalValue",
+ "typicalValues",
+ "Undo",
+ "Unique",
+ "Units",
+ "Unmixed",
+ "Up",
+ "UpdateOnly",
+ "UpperTriangular",
+ "Usage",
+ "UseCachedExampleOutput",
+ "UseHilbertFunction",
+ "UserMode",
+ "UseSyzygies",
+ "Variable",
+ "VariableBaseName",
+ "Variables",
+ "Vasconcelos",
+ "VectorFields",
+ "VectorGraphics",
+ "Verbose",
+ "Verbosity",
+ "Verify",
+ "VersalDeformations",
+ "Version",
+ "version",
+ "VerticalSpace",
+ "viewHelp",
+ "VirtualResolutions",
+ "Visualize",
+ "WebApp",
+ "Weights",
+ "WeylAlgebra",
+ "WeylGroups",
+ "WhitneyStratifications",
+ "Wrap",
+ "XML"
+ )
+
+class Macaulay2Lexer(RegexLexer):
+ """Lexer for Macaulay2, a software system for research in algebraic geometry."""
+
+ name = 'Macaulay2'
+ url = 'https://faculty.math.illinois.edu/Macaulay2/'
+ aliases = ['macaulay2']
+ filenames = ['*.m2']
+
+ tokens = {
+ 'root': [
+ (r'--.*$', Comment.Single),
+ (r'-\*', Comment.Multiline, 'block comment'),
+ (r'"', String, 'quote string'),
+ (r'///', String, 'slash string'),
+ (words(M2KEYWORDS, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(M2DATATYPES, prefix=r'\b', suffix=r'\b'), Name.Builtin),
+ (words(M2FUNCTIONS, prefix=r'\b', suffix=r'\b'), Name.Function),
+ (words(M2CONSTANTS, prefix=r'\b', suffix=r'\b'), Name.Constant),
+ (r'\s+', Text.Whitespace),
+ (r'.', Text)
+ ],
+ 'block comment' : [
+ (r'[^*-]+', Comment.Multiline),
+ (r'\*-', Comment.Multiline, '#pop'),
+ (r'[*-]', Comment.Multiline)
+ ],
+ 'quote string' : [
+ (r'[^\\"]+', String),
+ (r'"', String, '#pop'),
+ (r'\\"?', String),
+ ],
+ 'slash string' : [
+ (r'[^/]+', String),
+ (r'(//)+(?!/)', String),
+ (r'/(//)+(?!/)', String, '#pop'),
+ (r'/', String)
+ ]
+ }
diff --git a/pygments/lexers/make.py b/pygments/lexers/make.py
new file mode 100644
index 0000000..f2ec101
--- /dev/null
+++ b/pygments/lexers/make.py
@@ -0,0 +1,209 @@
+"""
+ pygments.lexers.make
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Makefiles and similar.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, bygroups, \
+ do_insertions, using
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Punctuation, Whitespace
+from pygments.lexers.shell import BashLexer
+
+__all__ = ['MakefileLexer', 'BaseMakefileLexer', 'CMakeLexer']
+
+
+class MakefileLexer(Lexer):
+ """
+ Lexer for BSD and GNU make extensions (lenient enough to handle both in
+ the same file even).
+
+ *Rewritten in Pygments 0.10.*
+ """
+
+ name = 'Makefile'
+ aliases = ['make', 'makefile', 'mf', 'bsdmake']
+ filenames = ['*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
+ mimetypes = ['text/x-makefile']
+
+ r_special = re.compile(
+ r'^(?:'
+ # BSD Make
+ r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
+ # GNU Make
+ r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:|vpath)|'
+ # GNU Automake
+ r'\s*(if|else|endif))(?=\s)')
+ r_comment = re.compile(r'^\s*@?#')
+
+ def get_tokens_unprocessed(self, text):
+ ins = []
+ lines = text.splitlines(keepends=True)
+ done = ''
+ lex = BaseMakefileLexer(**self.options)
+ backslashflag = False
+ for line in lines:
+ if self.r_special.match(line) or backslashflag:
+ ins.append((len(done), [(0, Comment.Preproc, line)]))
+ backslashflag = line.strip().endswith('\\')
+ elif self.r_comment.match(line):
+ ins.append((len(done), [(0, Comment, line)]))
+ else:
+ done += line
+ yield from do_insertions(ins, lex.get_tokens_unprocessed(done))
+
+ def analyse_text(text):
+ # Many makefiles have $(BIG_CAPS) style variables
+ if re.search(r'\$\([A-Z_]+\)', text):
+ return 0.1
+
+
+class BaseMakefileLexer(RegexLexer):
+ """
+ Lexer for simple Makefiles (no preprocessing).
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'Base Makefile'
+ aliases = ['basemake']
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ # recipes (need to allow spaces because of expandtabs)
+ (r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
+ # special variables
+ (r'\$[<@$+%?|*]', Keyword),
+ (r'\s+', Whitespace),
+ (r'#.*?\n', Comment),
+ (r'((?:un)?export)(\s+)(?=[\w${}\t -]+\n)',
+ bygroups(Keyword, Whitespace), 'export'),
+ (r'(?:un)?export\s+', Keyword),
+ # assignment
+ (r'([\w${}().-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)',
+ bygroups(
+ Name.Variable, Whitespace, Operator, Whitespace,
+ using(BashLexer))),
+ # strings
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ # targets
+ (r'([^\n:]+)(:+)([ \t]*)', bygroups(
+ Name.Function, Operator, Whitespace),
+ 'block-header'),
+ # expansions
+ (r'\$\(', Keyword, 'expansion'),
+ ],
+ 'expansion': [
+ (r'[^\w$().-]+', Text),
+ (r'[\w.-]+', Name.Variable),
+ (r'\$', Keyword),
+ (r'\(', Keyword, '#push'),
+ (r'\)', Keyword, '#pop'),
+ ],
+ 'export': [
+ (r'[\w${}-]+', Name.Variable),
+ (r'\n', Text, '#pop'),
+ (r'\s+', Whitespace),
+ ],
+ 'block-header': [
+ (r'[,|]', Punctuation),
+ (r'#.*?\n', Comment, '#pop'),
+ (r'\\\n', Text), # line continuation
+ (r'\$\(', Keyword, 'expansion'),
+ (r'[a-zA-Z_]+', Name),
+ (r'\n', Whitespace, '#pop'),
+ (r'.', Text),
+ ],
+ }
+
+
+class CMakeLexer(RegexLexer):
+ """
+ Lexer for CMake files.
+
+ .. versionadded:: 1.2
+ """
+ name = 'CMake'
+ url = 'https://cmake.org/documentation/'
+ aliases = ['cmake']
+ filenames = ['*.cmake', 'CMakeLists.txt']
+ mimetypes = ['text/x-cmake']
+
+ tokens = {
+ 'root': [
+ # (r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
+ # r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
+ # r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
+ # r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
+ # r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
+ # r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
+ # r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
+ # r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
+ # r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
+ # r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
+ # r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
+ # r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
+ # r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
+ # r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
+ # r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
+ # r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
+ # r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
+ # r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
+ # r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
+ # r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
+ # r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
+ # r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
+ # r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
+ # r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
+ # r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
+ # r'COUNTARGS)\b', Name.Builtin, 'args'),
+ (r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Whitespace,
+ Punctuation), 'args'),
+ include('keywords'),
+ include('ws')
+ ],
+ 'args': [
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop'),
+ (r'(\$\{)(.+?)(\})', bygroups(Operator, Name.Variable, Operator)),
+ (r'(\$ENV\{)(.+?)(\})', bygroups(Operator, Name.Variable, Operator)),
+ (r'(\$<)(.+?)(>)', bygroups(Operator, Name.Variable, Operator)),
+ (r'(?s)".*?"', String.Double),
+ (r'\\\S+', String),
+ (r'[^)$"# \t\n]+', String),
+ (r'\n', Whitespace), # explicitly legal
+ include('keywords'),
+ include('ws')
+ ],
+ 'string': [
+
+ ],
+ 'keywords': [
+ (r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
+ r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
+ ],
+ 'ws': [
+ (r'[ \t]+', Whitespace),
+ (r'#.*\n', Comment),
+ ]
+ }
+
+ def analyse_text(text):
+ exp = (
+ r'^[ \t]*CMAKE_MINIMUM_REQUIRED[ \t]*'
+ r'\([ \t]*VERSION[ \t]*\d+(\.\d+)*[ \t]*'
+ r'([ \t]FATAL_ERROR)?[ \t]*\)[ \t]*'
+ r'(#[^\n]*)?$'
+ )
+ if re.search(exp, text, flags=re.MULTILINE | re.IGNORECASE):
+ return 0.8
+ return 0.0
diff --git a/pygments/lexers/markup.py b/pygments/lexers/markup.py
new file mode 100644
index 0000000..4fdfc1a
--- /dev/null
+++ b/pygments/lexers/markup.py
@@ -0,0 +1,765 @@
+"""
+ pygments.lexers.markup
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for non-HTML markup languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexers.html import XmlLexer
+from pygments.lexers.javascript import JavascriptLexer
+from pygments.lexers.css import CssLexer
+
+from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \
+ using, this, do_insertions, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Other, Whitespace
+from pygments.util import get_bool_opt, ClassNotFound
+
+__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer',
+ 'MozPreprocHashLexer', 'MozPreprocPercentLexer',
+ 'MozPreprocXulLexer', 'MozPreprocJavascriptLexer',
+ 'MozPreprocCssLexer', 'MarkdownLexer', 'TiddlyWiki5Lexer']
+
+
+class BBCodeLexer(RegexLexer):
+ """
+ A lexer that highlights BBCode(-like) syntax.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'BBCode'
+ aliases = ['bbcode']
+ mimetypes = ['text/x-bbcode']
+
+ tokens = {
+ 'root': [
+ (r'[^[]+', Text),
+ # tag/end tag begin
+ (r'\[/?\w+', Keyword, 'tag'),
+ # stray bracket
+ (r'\[', Text),
+ ],
+ 'tag': [
+ (r'\s+', Text),
+ # attribute with value
+ (r'(\w+)(=)("?[^\s"\]]+"?)',
+ bygroups(Name.Attribute, Operator, String)),
+ # tag argument (a la [color=green])
+ (r'(=)("?[^\s"\]]+"?)',
+ bygroups(Operator, String)),
+ # tag end
+ (r'\]', Keyword, '#pop'),
+ ],
+ }
+
+
+class MoinWikiLexer(RegexLexer):
+ """
+ For MoinMoin (and Trac) Wiki markup.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'MoinMoin/Trac Wiki markup'
+ aliases = ['trac-wiki', 'moin']
+ filenames = []
+ mimetypes = ['text/x-trac-wiki']
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'^#.*$', Comment),
+ (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
+ # Titles
+ (r'^(=+)([^=]+)(=+)(\s*#.+)?$',
+ bygroups(Generic.Heading, using(this), Generic.Heading, String)),
+ # Literal code blocks, with optional shebang
+ (r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
+ (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
+ # Lists
+ (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
+ (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
+ # Other Formatting
+ (r'\[\[\w+.*?\]\]', Keyword), # Macro
+ (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
+ bygroups(Keyword, String, Keyword)), # Link
+ (r'^----+$', Keyword), # Horizontal rules
+ (r'[^\n\'\[{!_~^,|]+', Text),
+ (r'\n', Text),
+ (r'.', Text),
+ ],
+ 'codeblock': [
+ (r'\}\}\}', Name.Builtin, '#pop'),
+ # these blocks are allowed to be nested in Trac, but not MoinMoin
+ (r'\{\{\{', Text, '#push'),
+ (r'[^{}]+', Comment.Preproc), # slurp boring text
+ (r'.', Comment.Preproc), # allow loose { or }
+ ],
+ }
+
+
+class RstLexer(RegexLexer):
+ """
+ For reStructuredText markup.
+
+ .. versionadded:: 0.7
+
+ Additional options accepted:
+
+ `handlecodeblocks`
+ Highlight the contents of ``.. sourcecode:: language``,
+ ``.. code:: language`` and ``.. code-block:: language``
+ directives with a lexer for the given language (default:
+ ``True``).
+
+ .. versionadded:: 0.8
+ """
+ name = 'reStructuredText'
+ url = 'https://docutils.sourceforge.io/rst.html'
+ aliases = ['restructuredtext', 'rst', 'rest']
+ filenames = ['*.rst', '*.rest']
+ mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
+ flags = re.MULTILINE
+
+ def _handle_sourcecode(self, match):
+ from pygments.lexers import get_lexer_by_name
+
+ # section header
+ yield match.start(1), Punctuation, match.group(1)
+ yield match.start(2), Text, match.group(2)
+ yield match.start(3), Operator.Word, match.group(3)
+ yield match.start(4), Punctuation, match.group(4)
+ yield match.start(5), Text, match.group(5)
+ yield match.start(6), Keyword, match.group(6)
+ yield match.start(7), Text, match.group(7)
+
+ # lookup lexer if wanted and existing
+ lexer = None
+ if self.handlecodeblocks:
+ try:
+ lexer = get_lexer_by_name(match.group(6).strip())
+ except ClassNotFound:
+ pass
+ indention = match.group(8)
+ indention_size = len(indention)
+ code = (indention + match.group(9) + match.group(10) + match.group(11))
+
+ # no lexer for this language. handle it like it was a code block
+ if lexer is None:
+ yield match.start(8), String, code
+ return
+
+ # highlight the lines with the lexer.
+ ins = []
+ codelines = code.splitlines(True)
+ code = ''
+ for line in codelines:
+ if len(line) > indention_size:
+ ins.append((len(code), [(0, Text, line[:indention_size])]))
+ code += line[indention_size:]
+ else:
+ code += line
+ yield from do_insertions(ins, lexer.get_tokens_unprocessed(code))
+
+ # from docutils.parsers.rst.states
+ closers = '\'")]}>\u2019\u201d\xbb!?'
+ unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0'
+ end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
+ % (re.escape(unicode_delimiters),
+ re.escape(closers)))
+
+ tokens = {
+ 'root': [
+ # Heading with overline
+ (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
+ r'(.+)(\n)(\1)(\n)',
+ bygroups(Generic.Heading, Text, Generic.Heading,
+ Text, Generic.Heading, Text)),
+ # Plain heading
+ (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
+ r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
+ bygroups(Generic.Heading, Text, Generic.Heading, Text)),
+ # Bulleted lists
+ (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
+ bygroups(Text, Number, using(this, state='inline'))),
+ # Numbered lists
+ (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
+ bygroups(Text, Number, using(this, state='inline'))),
+ (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
+ bygroups(Text, Number, using(this, state='inline'))),
+ # Numbered, but keep words at BOL from becoming lists
+ (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
+ bygroups(Text, Number, using(this, state='inline'))),
+ (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
+ bygroups(Text, Number, using(this, state='inline'))),
+ # Line blocks
+ (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
+ bygroups(Text, Operator, using(this, state='inline'))),
+ # Sourcecode directives
+ (r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
+ r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*)?\n)+)',
+ _handle_sourcecode),
+ # A directive
+ (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
+ bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
+ using(this, state='inline'))),
+ # A reference target
+ (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
+ bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
+ # A footnote/citation target
+ (r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
+ bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
+ # A substitution def
+ (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
+ bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
+ Punctuation, Text, using(this, state='inline'))),
+ # Comments
+ (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
+ # Field list marker
+ (r'^( *)(:(?:\\\\|\\:|[^:\n])+:(?=\s))([ \t]*)',
+ bygroups(Text, Name.Class, Text)),
+ # Definition list
+ (r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
+ bygroups(using(this, state='inline'), using(this, state='inline'))),
+ # Code blocks
+ (r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*)?\n)+)',
+ bygroups(String.Escape, Text, String, String, Text, String)),
+ include('inline'),
+ ],
+ 'inline': [
+ (r'\\.', Text), # escape
+ (r'``', String, 'literal'), # code
+ (r'(`.+?)(<.+?>)(`__?)', # reference with inline target
+ bygroups(String, String.Interpol, String)),
+ (r'`.+?`__?', String), # reference
+ (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
+ bygroups(Name.Variable, Name.Attribute)), # role
+ (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
+ bygroups(Name.Attribute, Name.Variable)), # role (content first)
+ (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
+ (r'\*.+?\*', Generic.Emph), # Emphasis
+ (r'\[.*?\]_', String), # Footnote or citation
+ (r'<.+?>', Name.Tag), # Hyperlink
+ (r'[^\\\n\[*`:]+', Text),
+ (r'.', Text),
+ ],
+ 'literal': [
+ (r'[^`]+', String),
+ (r'``' + end_string_suffix, String, '#pop'),
+ (r'`', String),
+ ]
+ }
+
+ def __init__(self, **options):
+ self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
+ RegexLexer.__init__(self, **options)
+
+ def analyse_text(text):
+ if text[:2] == '..' and text[2:3] != '.':
+ return 0.3
+ p1 = text.find("\n")
+ p2 = text.find("\n", p1 + 1)
+ if (p2 > -1 and # has two lines
+ p1 * 2 + 1 == p2 and # they are the same length
+ text[p1+1] in '-=' and # the next line both starts and ends with
+ text[p1+1] == text[p2-1]): # ...a sufficiently high header
+ return 0.5
+
+
+class TexLexer(RegexLexer):
+ """
+ Lexer for the TeX and LaTeX typesetting languages.
+ """
+
+ name = 'TeX'
+ aliases = ['tex', 'latex']
+ filenames = ['*.tex', '*.aux', '*.toc']
+ mimetypes = ['text/x-tex', 'text/x-latex']
+
+ tokens = {
+ 'general': [
+ (r'%.*?\n', Comment),
+ (r'[{}]', Name.Builtin),
+ (r'[&_^]', Name.Builtin),
+ ],
+ 'root': [
+ (r'\\\[', String.Backtick, 'displaymath'),
+ (r'\\\(', String, 'inlinemath'),
+ (r'\$\$', String.Backtick, 'displaymath'),
+ (r'\$', String, 'inlinemath'),
+ (r'\\([a-zA-Z]+|.)', Keyword, 'command'),
+ (r'\\$', Keyword),
+ include('general'),
+ (r'[^\\$%&_^{}]+', Text),
+ ],
+ 'math': [
+ (r'\\([a-zA-Z]+|.)', Name.Variable),
+ include('general'),
+ (r'[0-9]+', Number),
+ (r'[-=!+*/()\[\]]', Operator),
+ (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
+ ],
+ 'inlinemath': [
+ (r'\\\)', String, '#pop'),
+ (r'\$', String, '#pop'),
+ include('math'),
+ ],
+ 'displaymath': [
+ (r'\\\]', String, '#pop'),
+ (r'\$\$', String, '#pop'),
+ (r'\$', Name.Builtin),
+ include('math'),
+ ],
+ 'command': [
+ (r'\[.*?\]', Name.Attribute),
+ (r'\*', Keyword),
+ default('#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ for start in ("\\documentclass", "\\input", "\\documentstyle",
+ "\\relax"):
+ if text[:len(start)] == start:
+ return True
+
+
+class GroffLexer(RegexLexer):
+ """
+ Lexer for the (g)roff typesetting language, supporting groff
+ extensions. Mainly useful for highlighting manpage sources.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'Groff'
+ aliases = ['groff', 'nroff', 'man']
+ filenames = ['*.[1-9]', '*.man', '*.1p', '*.3pm']
+ mimetypes = ['application/x-troff', 'text/troff']
+
+ tokens = {
+ 'root': [
+ (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
+ (r'\.', Punctuation, 'request'),
+ # Regular characters, slurp till we find a backslash or newline
+ (r'[^\\\n]+', Text, 'textline'),
+ default('textline'),
+ ],
+ 'textline': [
+ include('escapes'),
+ (r'[^\\\n]+', Text),
+ (r'\n', Text, '#pop'),
+ ],
+ 'escapes': [
+ # groff has many ways to write escapes.
+ (r'\\"[^\n]*', Comment),
+ (r'\\[fn]\w', String.Escape),
+ (r'\\\(.{2}', String.Escape),
+ (r'\\.\[.*\]', String.Escape),
+ (r'\\.', String.Escape),
+ (r'\\\n', Text, 'request'),
+ ],
+ 'request': [
+ (r'\n', Text, '#pop'),
+ include('escapes'),
+ (r'"[^\n"]+"', String.Double),
+ (r'\d+', Number),
+ (r'\S+', String),
+ (r'\s+', Text),
+ ],
+ }
+
+ def analyse_text(text):
+ if text[:1] != '.':
+ return False
+ if text[:3] == '.\\"':
+ return True
+ if text[:4] == '.TH ':
+ return True
+ if text[1:3].isalnum() and text[3].isspace():
+ return 0.9
+
+
+class MozPreprocHashLexer(RegexLexer):
+ """
+ Lexer for Mozilla Preprocessor files (with '#' as the marker).
+
+ Other data is left untouched.
+
+ .. versionadded:: 2.0
+ """
+ name = 'mozhashpreproc'
+ aliases = [name]
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'^#', Comment.Preproc, ('expr', 'exprstart')),
+ (r'.+', Other),
+ ],
+ 'exprstart': [
+ (r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'),
+ (words((
+ 'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif',
+ 'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter',
+ 'include', 'includesubst', 'error')),
+ Comment.Preproc, '#pop'),
+ ],
+ 'expr': [
+ (words(('!', '!=', '==', '&&', '||')), Operator),
+ (r'(defined)(\()', bygroups(Keyword, Punctuation)),
+ (r'\)', Punctuation),
+ (r'[0-9]+', Number.Decimal),
+ (r'__\w+?__', Name.Variable),
+ (r'@\w+?@', Name.Class),
+ (r'\w+', Name),
+ (r'\n', Text, '#pop'),
+ (r'\s+', Text),
+ (r'\S', Punctuation),
+ ],
+ }
+
+
+class MozPreprocPercentLexer(MozPreprocHashLexer):
+ """
+ Lexer for Mozilla Preprocessor files (with '%' as the marker).
+
+ Other data is left untouched.
+
+ .. versionadded:: 2.0
+ """
+ name = 'mozpercentpreproc'
+ aliases = [name]
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'^%', Comment.Preproc, ('expr', 'exprstart')),
+ (r'.+', Other),
+ ],
+ }
+
+
+class MozPreprocXulLexer(DelegatingLexer):
+ """
+ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
+ `XmlLexer`.
+
+ .. versionadded:: 2.0
+ """
+ name = "XUL+mozpreproc"
+ aliases = ['xul+mozpreproc']
+ filenames = ['*.xul.in']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, MozPreprocHashLexer, **options)
+
+
+class MozPreprocJavascriptLexer(DelegatingLexer):
+ """
+ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
+ `JavascriptLexer`.
+
+ .. versionadded:: 2.0
+ """
+ name = "Javascript+mozpreproc"
+ aliases = ['javascript+mozpreproc']
+ filenames = ['*.js.in']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, MozPreprocHashLexer, **options)
+
+
+class MozPreprocCssLexer(DelegatingLexer):
+ """
+ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
+ `CssLexer`.
+
+ .. versionadded:: 2.0
+ """
+ name = "CSS+mozpreproc"
+ aliases = ['css+mozpreproc']
+ filenames = ['*.css.in']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, MozPreprocPercentLexer, **options)
+
+
+class MarkdownLexer(RegexLexer):
+ """
+ For Markdown markup.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Markdown'
+ url = 'https://daringfireball.net/projects/markdown/'
+ aliases = ['markdown', 'md']
+ filenames = ['*.md', '*.markdown']
+ mimetypes = ["text/x-markdown"]
+ flags = re.MULTILINE
+
+ def _handle_codeblock(self, match):
+ """
+ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
+ """
+ from pygments.lexers import get_lexer_by_name
+
+ # section header
+ yield match.start(1), String.Backtick, match.group(1)
+ yield match.start(2), String.Backtick, match.group(2)
+ yield match.start(3), Text , match.group(3)
+
+ # lookup lexer if wanted and existing
+ lexer = None
+ if self.handlecodeblocks:
+ try:
+ lexer = get_lexer_by_name( match.group(2).strip() )
+ except ClassNotFound:
+ pass
+ code = match.group(4)
+
+ # no lexer for this language. handle it like it was a code block
+ if lexer is None:
+ yield match.start(4), String, code
+ else:
+ yield from do_insertions([], lexer.get_tokens_unprocessed(code))
+
+ yield match.start(5), String.Backtick, match.group(5)
+
+ tokens = {
+ 'root': [
+ # heading with '#' prefix (atx-style)
+ (r'(^#[^#].+)(\n)', bygroups(Generic.Heading, Text)),
+ # subheading with '#' prefix (atx-style)
+ (r'(^#{2,6}[^#].+)(\n)', bygroups(Generic.Subheading, Text)),
+ # heading with '=' underlines (Setext-style)
+ (r'^(.+)(\n)(=+)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)),
+ # subheading with '-' underlines (Setext-style)
+ (r'^(.+)(\n)(-+)(\n)', bygroups(Generic.Subheading, Text, Generic.Subheading, Text)),
+ # task list
+ (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)',
+ bygroups(Whitespace, Keyword, Keyword, using(this, state='inline'))),
+ # bulleted list
+ (r'^(\s*)([*-])(\s)(.+\n)',
+ bygroups(Whitespace, Keyword, Whitespace, using(this, state='inline'))),
+ # numbered list
+ (r'^(\s*)([0-9]+\.)( .+\n)',
+ bygroups(Whitespace, Keyword, using(this, state='inline'))),
+ # quote
+ (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
+ # code block fenced by 3 backticks
+ (r'^(\s*```\n[\w\W]*?^\s*```$\n)', String.Backtick),
+ # code block with language
+ (r'^(\s*```)(\w+)(\n)([\w\W]*?)(^\s*```$\n)', _handle_codeblock),
+
+ include('inline'),
+ ],
+ 'inline': [
+ # escape
+ (r'\\.', Text),
+ # inline code
+ (r'([^`]?)(`[^`\n]+`)', bygroups(Text, String.Backtick)),
+ # warning: the following rules eat outer tags.
+ # eg. **foo _bar_ baz** => foo and baz are not recognized as bold
+ # bold fenced by '**'
+ (r'([^\*]?)(\*\*[^* \n][^*\n]*\*\*)', bygroups(Text, Generic.Strong)),
+ # bold fenced by '__'
+ (r'([^_]?)(__[^_ \n][^_\n]*__)', bygroups(Text, Generic.Strong)),
+ # italics fenced by '*'
+ (r'([^\*]?)(\*[^* \n][^*\n]*\*)', bygroups(Text, Generic.Emph)),
+ # italics fenced by '_'
+ (r'([^_]?)(_[^_ \n][^_\n]*_)', bygroups(Text, Generic.Emph)),
+ # strikethrough
+ (r'([^~]?)(~~[^~ \n][^~\n]*~~)', bygroups(Text, Generic.Deleted)),
+ # mentions and topics (twitter and github stuff)
+ (r'[@#][\w/:]+', Name.Entity),
+ # (image?) links eg: ![Image of Yaktocat](https://octodex.github.com/images/yaktocat.png)
+ (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))',
+ bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)),
+ # reference-style links, e.g.:
+ # [an example][id]
+ # [id]: http://example.com/
+ (r'(\[)([^]]+)(\])(\[)([^]]*)(\])',
+ bygroups(Text, Name.Tag, Text, Text, Name.Label, Text)),
+ (r'^(\s*\[)([^]]*)(\]:\s*)(.+)',
+ bygroups(Text, Name.Label, Text, Name.Attribute)),
+
+ # general text, must come last!
+ (r'[^\\\s]+', Text),
+ (r'.', Text),
+ ],
+ }
+
+ def __init__(self, **options):
+ self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
+ RegexLexer.__init__(self, **options)
+
+
+class TiddlyWiki5Lexer(RegexLexer):
+ """
+ For TiddlyWiki5 markup.
+
+ .. versionadded:: 2.7
+ """
+ name = 'tiddler'
+ url = 'https://tiddlywiki.com/#TiddlerFiles'
+ aliases = ['tid']
+ filenames = ['*.tid']
+ mimetypes = ["text/vnd.tiddlywiki"]
+ flags = re.MULTILINE
+
+ def _handle_codeblock(self, match):
+ """
+ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
+ """
+ from pygments.lexers import get_lexer_by_name
+
+ # section header
+ yield match.start(1), String, match.group(1)
+ yield match.start(2), String, match.group(2)
+ yield match.start(3), Text, match.group(3)
+
+ # lookup lexer if wanted and existing
+ lexer = None
+ if self.handlecodeblocks:
+ try:
+ lexer = get_lexer_by_name(match.group(2).strip())
+ except ClassNotFound:
+ pass
+ code = match.group(4)
+
+ # no lexer for this language. handle it like it was a code block
+ if lexer is None:
+ yield match.start(4), String, code
+ return
+
+ yield from do_insertions([], lexer.get_tokens_unprocessed(code))
+
+ yield match.start(5), String, match.group(5)
+
+ def _handle_cssblock(self, match):
+ """
+ match args: 1:style tag 2:newline, 3:code, 4:closing style tag
+ """
+ from pygments.lexers import get_lexer_by_name
+
+ # section header
+ yield match.start(1), String, match.group(1)
+ yield match.start(2), String, match.group(2)
+
+ lexer = None
+ if self.handlecodeblocks:
+ try:
+ lexer = get_lexer_by_name('css')
+ except ClassNotFound:
+ pass
+ code = match.group(3)
+
+ # no lexer for this language. handle it like it was a code block
+ if lexer is None:
+ yield match.start(3), String, code
+ return
+
+ yield from do_insertions([], lexer.get_tokens_unprocessed(code))
+
+ yield match.start(4), String, match.group(4)
+
+ tokens = {
+ 'root': [
+ # title in metadata section
+ (r'^(title)(:\s)(.+\n)', bygroups(Keyword, Text, Generic.Heading)),
+ # headings
+ (r'^(!)([^!].+\n)', bygroups(Generic.Heading, Text)),
+ (r'^(!{2,6})(.+\n)', bygroups(Generic.Subheading, Text)),
+ # bulleted or numbered lists or single-line block quotes
+ # (can be mixed)
+ (r'^(\s*)([*#>]+)(\s*)(.+\n)',
+ bygroups(Text, Keyword, Text, using(this, state='inline'))),
+ # multi-line block quotes
+ (r'^(<<<.*\n)([\w\W]*?)(^<<<.*$)', bygroups(String, Text, String)),
+ # table header
+ (r'^(\|.*?\|h)$', bygroups(Generic.Strong)),
+ # table footer or caption
+ (r'^(\|.*?\|[cf])$', bygroups(Generic.Emph)),
+ # table class
+ (r'^(\|.*?\|k)$', bygroups(Name.Tag)),
+ # definitions
+ (r'^(;.*)$', bygroups(Generic.Strong)),
+ # text block
+ (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
+ # code block with language
+ (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock),
+ # CSS style block
+ (r'^(<style>)(\n)([\w\W]*?)(^</style>$)', _handle_cssblock),
+
+ include('keywords'),
+ include('inline'),
+ ],
+ 'keywords': [
+ (words((
+ '\\define', '\\end', 'caption', 'created', 'modified', 'tags',
+ 'title', 'type'), prefix=r'^', suffix=r'\b'),
+ Keyword),
+ ],
+ 'inline': [
+ # escape
+ (r'\\.', Text),
+ # created or modified date
+ (r'\d{17}', Number.Integer),
+ # italics
+ (r'(\s)(//[^/]+//)((?=\W|\n))',
+ bygroups(Text, Generic.Emph, Text)),
+ # superscript
+ (r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)),
+ # subscript
+ (r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)),
+ # underscore
+ (r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)),
+ # bold
+ (r"(\s)(''[^']+'')((?=\W|\n))",
+ bygroups(Text, Generic.Strong, Text)),
+ # strikethrough
+ (r'(\s)(~~[^~]+~~)((?=\W|\n))',
+ bygroups(Text, Generic.Deleted, Text)),
+ # TiddlyWiki variables
+ (r'<<[^>]+>>', Name.Tag),
+ (r'\$\$[^$]+\$\$', Name.Tag),
+ (r'\$\([^)]+\)\$', Name.Tag),
+ # TiddlyWiki style or class
+ (r'^@@.*$', Name.Tag),
+ # HTML tags
+ (r'</?[^>]+>', Name.Tag),
+ # inline code
+ (r'`[^`]+`', String.Backtick),
+ # HTML escaped symbols
+ (r'&\S*?;', String.Regex),
+ # Wiki links
+ (r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)),
+ # External links
+ (r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})',
+ bygroups(Text, Name.Tag, Text, Name.Attribute, Text)),
+ # Transclusion
+ (r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)),
+ # URLs
+ (r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)),
+
+ # general text, must come last!
+ (r'[\w]+', Text),
+ (r'.', Text)
+ ],
+ }
+
+ def __init__(self, **options):
+ self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
+ RegexLexer.__init__(self, **options)
diff --git a/pygments/lexers/math.py b/pygments/lexers/math.py
new file mode 100644
index 0000000..22144fa
--- /dev/null
+++ b/pygments/lexers/math.py
@@ -0,0 +1,20 @@
+"""
+ pygments.lexers.math
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Just export lexers that were contained in this module.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.python import NumPyLexer
+from pygments.lexers.matlab import MatlabLexer, MatlabSessionLexer, \
+ OctaveLexer, ScilabLexer
+from pygments.lexers.julia import JuliaLexer, JuliaConsoleLexer
+from pygments.lexers.r import RConsoleLexer, SLexer, RdLexer
+from pygments.lexers.modeling import BugsLexer, JagsLexer, StanLexer
+from pygments.lexers.idl import IDLLexer
+from pygments.lexers.algebra import MuPADLexer
+
+__all__ = []
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
new file mode 100644
index 0000000..058661e
--- /dev/null
+++ b/pygments/lexers/matlab.py
@@ -0,0 +1,3308 @@
+"""
+ pygments.lexers.matlab
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Matlab and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, default, words, \
+ do_insertions, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+
+from pygments.lexers import _scilab_builtins
+
+__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer']
+
+
+class MatlabLexer(RegexLexer):
+ """
+ For Matlab source code.
+
+ .. versionadded:: 0.10
+ """
+ name = 'Matlab'
+ aliases = ['matlab']
+ filenames = ['*.m']
+ mimetypes = ['text/matlab']
+
+ _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\./|/|\\'
+
+ tokens = {
+ 'expressions': [
+ # operators:
+ (_operators, Operator),
+
+ # numbers (must come before punctuation to handle `.5`; cannot use
+ # `\b` due to e.g. `5. + .5`). The negative lookahead on operators
+ # avoids including the dot in `1./x` (the dot is part of `./`).
+ (r'(?<!\w)((\d+\.\d+)|(\d*\.\d+)|(\d+\.(?!%s)))'
+ r'([eEf][+-]?\d+)?(?!\w)' % _operators, Number.Float),
+ (r'\b\d+[eEf][+-]?[0-9]+\b', Number.Float),
+ (r'\b\d+\b', Number.Integer),
+
+ # punctuation:
+ (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
+ (r'=|:|;', Punctuation),
+
+ # quote can be transpose, instead of string:
+ # (not great, but handles common cases...)
+ (r'(?<=[\w)\].])\'+', Operator),
+
+ (r'"(""|[^"])*"', String),
+
+ (r'(?<![\w)\].])\'', String, 'string'),
+ (r'[a-zA-Z_]\w*', Name),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ 'root': [
+ # line starting with '!' is sent as a system command. not sure what
+ # label to use...
+ (r'^!.*', String.Other),
+ (r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
+ (r'%.*$', Comment),
+ (r'(\s*^\s*)(function)\b', bygroups(Whitespace, Keyword), 'deffunc'),
+ (r'(\s*^\s*)(properties)(\s+)(\()',
+ bygroups(Whitespace, Keyword, Whitespace, Punctuation),
+ ('defprops', 'propattrs')),
+ (r'(\s*^\s*)(properties)\b',
+ bygroups(Whitespace, Keyword), 'defprops'),
+
+ # from 'iskeyword' on version 9.4 (R2018a):
+ # Check that there is no preceding dot, as keywords are valid field
+ # names.
+ (words(('break', 'case', 'catch', 'classdef', 'continue',
+ 'dynamicprops', 'else', 'elseif', 'end', 'for', 'function',
+ 'global', 'if', 'methods', 'otherwise', 'parfor',
+ 'persistent', 'return', 'spmd', 'switch',
+ 'try', 'while'),
+ prefix=r'(?<!\.)(\s*)(', suffix=r')\b'),
+ bygroups(Whitespace, Keyword)),
+
+ (
+ words(
+ [
+ # See https://mathworks.com/help/matlab/referencelist.html
+ # Below data from 2021-02-10T18:24:08Z
+ # for Matlab release R2020b
+ "BeginInvoke",
+ "COM",
+ "Combine",
+ "CombinedDatastore",
+ "EndInvoke",
+ "Execute",
+ "FactoryGroup",
+ "FactorySetting",
+ "Feval",
+ "FunctionTestCase",
+ "GetCharArray",
+ "GetFullMatrix",
+ "GetVariable",
+ "GetWorkspaceData",
+ "GraphPlot",
+ "H5.close",
+ "H5.garbage_collect",
+ "H5.get_libversion",
+ "H5.open",
+ "H5.set_free_list_limits",
+ "H5A.close",
+ "H5A.create",
+ "H5A.delete",
+ "H5A.get_info",
+ "H5A.get_name",
+ "H5A.get_space",
+ "H5A.get_type",
+ "H5A.iterate",
+ "H5A.open",
+ "H5A.open_by_idx",
+ "H5A.open_by_name",
+ "H5A.read",
+ "H5A.write",
+ "H5D.close",
+ "H5D.create",
+ "H5D.get_access_plist",
+ "H5D.get_create_plist",
+ "H5D.get_offset",
+ "H5D.get_space",
+ "H5D.get_space_status",
+ "H5D.get_storage_size",
+ "H5D.get_type",
+ "H5D.open",
+ "H5D.read",
+ "H5D.set_extent",
+ "H5D.vlen_get_buf_size",
+ "H5D.write",
+ "H5DS.attach_scale",
+ "H5DS.detach_scale",
+ "H5DS.get_label",
+ "H5DS.get_num_scales",
+ "H5DS.get_scale_name",
+ "H5DS.is_scale",
+ "H5DS.iterate_scales",
+ "H5DS.set_label",
+ "H5DS.set_scale",
+ "H5E.clear",
+ "H5E.get_major",
+ "H5E.get_minor",
+ "H5E.walk",
+ "H5F.close",
+ "H5F.create",
+ "H5F.flush",
+ "H5F.get_access_plist",
+ "H5F.get_create_plist",
+ "H5F.get_filesize",
+ "H5F.get_freespace",
+ "H5F.get_info",
+ "H5F.get_mdc_config",
+ "H5F.get_mdc_hit_rate",
+ "H5F.get_mdc_size",
+ "H5F.get_name",
+ "H5F.get_obj_count",
+ "H5F.get_obj_ids",
+ "H5F.is_hdf5",
+ "H5F.mount",
+ "H5F.open",
+ "H5F.reopen",
+ "H5F.set_mdc_config",
+ "H5F.unmount",
+ "H5G.close",
+ "H5G.create",
+ "H5G.get_info",
+ "H5G.open",
+ "H5I.dec_ref",
+ "H5I.get_file_id",
+ "H5I.get_name",
+ "H5I.get_ref",
+ "H5I.get_type",
+ "H5I.inc_ref",
+ "H5I.is_valid",
+ "H5L.copy",
+ "H5L.create_external",
+ "H5L.create_hard",
+ "H5L.create_soft",
+ "H5L.delete",
+ "H5L.exists",
+ "H5L.get_info",
+ "H5L.get_name_by_idx",
+ "H5L.get_val",
+ "H5L.iterate",
+ "H5L.iterate_by_name",
+ "H5L.move",
+ "H5L.visit",
+ "H5L.visit_by_name",
+ "H5ML.compare_values",
+ "H5ML.get_constant_names",
+ "H5ML.get_constant_value",
+ "H5ML.get_function_names",
+ "H5ML.get_mem_datatype",
+ "H5O.close",
+ "H5O.copy",
+ "H5O.get_comment",
+ "H5O.get_comment_by_name",
+ "H5O.get_info",
+ "H5O.link",
+ "H5O.open",
+ "H5O.open_by_idx",
+ "H5O.set_comment",
+ "H5O.set_comment_by_name",
+ "H5O.visit",
+ "H5O.visit_by_name",
+ "H5P.all_filters_avail",
+ "H5P.close",
+ "H5P.close_class",
+ "H5P.copy",
+ "H5P.create",
+ "H5P.equal",
+ "H5P.exist",
+ "H5P.fill_value_defined",
+ "H5P.get",
+ "H5P.get_alignment",
+ "H5P.get_alloc_time",
+ "H5P.get_attr_creation_order",
+ "H5P.get_attr_phase_change",
+ "H5P.get_btree_ratios",
+ "H5P.get_char_encoding",
+ "H5P.get_chunk",
+ "H5P.get_chunk_cache",
+ "H5P.get_class",
+ "H5P.get_class_name",
+ "H5P.get_class_parent",
+ "H5P.get_copy_object",
+ "H5P.get_create_intermediate_group",
+ "H5P.get_driver",
+ "H5P.get_edc_check",
+ "H5P.get_external",
+ "H5P.get_external_count",
+ "H5P.get_family_offset",
+ "H5P.get_fapl_core",
+ "H5P.get_fapl_family",
+ "H5P.get_fapl_multi",
+ "H5P.get_fclose_degree",
+ "H5P.get_fill_time",
+ "H5P.get_fill_value",
+ "H5P.get_filter",
+ "H5P.get_filter_by_id",
+ "H5P.get_gc_references",
+ "H5P.get_hyper_vector_size",
+ "H5P.get_istore_k",
+ "H5P.get_layout",
+ "H5P.get_libver_bounds",
+ "H5P.get_link_creation_order",
+ "H5P.get_link_phase_change",
+ "H5P.get_mdc_config",
+ "H5P.get_meta_block_size",
+ "H5P.get_multi_type",
+ "H5P.get_nfilters",
+ "H5P.get_nprops",
+ "H5P.get_sieve_buf_size",
+ "H5P.get_size",
+ "H5P.get_sizes",
+ "H5P.get_small_data_block_size",
+ "H5P.get_sym_k",
+ "H5P.get_userblock",
+ "H5P.get_version",
+ "H5P.isa_class",
+ "H5P.iterate",
+ "H5P.modify_filter",
+ "H5P.remove_filter",
+ "H5P.set",
+ "H5P.set_alignment",
+ "H5P.set_alloc_time",
+ "H5P.set_attr_creation_order",
+ "H5P.set_attr_phase_change",
+ "H5P.set_btree_ratios",
+ "H5P.set_char_encoding",
+ "H5P.set_chunk",
+ "H5P.set_chunk_cache",
+ "H5P.set_copy_object",
+ "H5P.set_create_intermediate_group",
+ "H5P.set_deflate",
+ "H5P.set_edc_check",
+ "H5P.set_external",
+ "H5P.set_family_offset",
+ "H5P.set_fapl_core",
+ "H5P.set_fapl_family",
+ "H5P.set_fapl_log",
+ "H5P.set_fapl_multi",
+ "H5P.set_fapl_sec2",
+ "H5P.set_fapl_split",
+ "H5P.set_fapl_stdio",
+ "H5P.set_fclose_degree",
+ "H5P.set_fill_time",
+ "H5P.set_fill_value",
+ "H5P.set_filter",
+ "H5P.set_fletcher32",
+ "H5P.set_gc_references",
+ "H5P.set_hyper_vector_size",
+ "H5P.set_istore_k",
+ "H5P.set_layout",
+ "H5P.set_libver_bounds",
+ "H5P.set_link_creation_order",
+ "H5P.set_link_phase_change",
+ "H5P.set_mdc_config",
+ "H5P.set_meta_block_size",
+ "H5P.set_multi_type",
+ "H5P.set_nbit",
+ "H5P.set_scaleoffset",
+ "H5P.set_shuffle",
+ "H5P.set_sieve_buf_size",
+ "H5P.set_sizes",
+ "H5P.set_small_data_block_size",
+ "H5P.set_sym_k",
+ "H5P.set_userblock",
+ "H5R.create",
+ "H5R.dereference",
+ "H5R.get_name",
+ "H5R.get_obj_type",
+ "H5R.get_region",
+ "H5S.close",
+ "H5S.copy",
+ "H5S.create",
+ "H5S.create_simple",
+ "H5S.extent_copy",
+ "H5S.get_select_bounds",
+ "H5S.get_select_elem_npoints",
+ "H5S.get_select_elem_pointlist",
+ "H5S.get_select_hyper_blocklist",
+ "H5S.get_select_hyper_nblocks",
+ "H5S.get_select_npoints",
+ "H5S.get_select_type",
+ "H5S.get_simple_extent_dims",
+ "H5S.get_simple_extent_ndims",
+ "H5S.get_simple_extent_npoints",
+ "H5S.get_simple_extent_type",
+ "H5S.is_simple",
+ "H5S.offset_simple",
+ "H5S.select_all",
+ "H5S.select_elements",
+ "H5S.select_hyperslab",
+ "H5S.select_none",
+ "H5S.select_valid",
+ "H5S.set_extent_none",
+ "H5S.set_extent_simple",
+ "H5T.array_create",
+ "H5T.close",
+ "H5T.commit",
+ "H5T.committed",
+ "H5T.copy",
+ "H5T.create",
+ "H5T.detect_class",
+ "H5T.enum_create",
+ "H5T.enum_insert",
+ "H5T.enum_nameof",
+ "H5T.enum_valueof",
+ "H5T.equal",
+ "H5T.get_array_dims",
+ "H5T.get_array_ndims",
+ "H5T.get_class",
+ "H5T.get_create_plist",
+ "H5T.get_cset",
+ "H5T.get_ebias",
+ "H5T.get_fields",
+ "H5T.get_inpad",
+ "H5T.get_member_class",
+ "H5T.get_member_index",
+ "H5T.get_member_name",
+ "H5T.get_member_offset",
+ "H5T.get_member_type",
+ "H5T.get_member_value",
+ "H5T.get_native_type",
+ "H5T.get_nmembers",
+ "H5T.get_norm",
+ "H5T.get_offset",
+ "H5T.get_order",
+ "H5T.get_pad",
+ "H5T.get_precision",
+ "H5T.get_sign",
+ "H5T.get_size",
+ "H5T.get_strpad",
+ "H5T.get_super",
+ "H5T.get_tag",
+ "H5T.insert",
+ "H5T.is_variable_str",
+ "H5T.lock",
+ "H5T.open",
+ "H5T.pack",
+ "H5T.set_cset",
+ "H5T.set_ebias",
+ "H5T.set_fields",
+ "H5T.set_inpad",
+ "H5T.set_norm",
+ "H5T.set_offset",
+ "H5T.set_order",
+ "H5T.set_pad",
+ "H5T.set_precision",
+ "H5T.set_sign",
+ "H5T.set_size",
+ "H5T.set_strpad",
+ "H5T.set_tag",
+ "H5T.vlen_create",
+ "H5Z.filter_avail",
+ "H5Z.get_filter_info",
+ "Inf",
+ "KeyValueDatastore",
+ "KeyValueStore",
+ "MException",
+ "MException.last",
+ "MaximizeCommandWindow",
+ "MemoizedFunction",
+ "MinimizeCommandWindow",
+ "NET",
+ "NET.Assembly",
+ "NET.GenericClass",
+ "NET.NetException",
+ "NET.addAssembly",
+ "NET.convertArray",
+ "NET.createArray",
+ "NET.createGeneric",
+ "NET.disableAutoRelease",
+ "NET.enableAutoRelease",
+ "NET.invokeGenericMethod",
+ "NET.isNETSupported",
+ "NET.setStaticProperty",
+ "NaN",
+ "NaT",
+ "OperationResult",
+ "PutCharArray",
+ "PutFullMatrix",
+ "PutWorkspaceData",
+ "PythonEnvironment",
+ "Quit",
+ "RandStream",
+ "ReleaseCompatibilityException",
+ "ReleaseCompatibilityResults",
+ "Remove",
+ "RemoveAll",
+ "Setting",
+ "SettingsGroup",
+ "TallDatastore",
+ "Test",
+ "TestResult",
+ "Tiff",
+ "TransformedDatastore",
+ "ValueIterator",
+ "VersionResults",
+ "VideoReader",
+ "VideoWriter",
+ "abs",
+ "accumarray",
+ "acos",
+ "acosd",
+ "acosh",
+ "acot",
+ "acotd",
+ "acoth",
+ "acsc",
+ "acscd",
+ "acsch",
+ "actxGetRunningServer",
+ "actxserver",
+ "add",
+ "addCause",
+ "addCorrection",
+ "addFile",
+ "addFolderIncludingChildFiles",
+ "addGroup",
+ "addLabel",
+ "addPath",
+ "addReference",
+ "addSetting",
+ "addShortcut",
+ "addShutdownFile",
+ "addStartupFile",
+ "addStyle",
+ "addToolbarExplorationButtons",
+ "addboundary",
+ "addcats",
+ "addedge",
+ "addevent",
+ "addlistener",
+ "addmulti",
+ "addnode",
+ "addpath",
+ "addpoints",
+ "addpref",
+ "addprop",
+ "addsample",
+ "addsampletocollection",
+ "addtodate",
+ "addts",
+ "addvars",
+ "adjacency",
+ "airy",
+ "align",
+ "alim",
+ "all",
+ "allchild",
+ "alpha",
+ "alphaShape",
+ "alphaSpectrum",
+ "alphaTriangulation",
+ "alphamap",
+ "alphanumericBoundary",
+ "alphanumericsPattern",
+ "amd",
+ "analyzeCodeCompatibility",
+ "ancestor",
+ "angle",
+ "animatedline",
+ "annotation",
+ "ans",
+ "any",
+ "appdesigner",
+ "append",
+ "area",
+ "arguments",
+ "array2table",
+ "array2timetable",
+ "arrayDatastore",
+ "arrayfun",
+ "asFewOfPattern",
+ "asManyOfPattern",
+ "ascii",
+ "asec",
+ "asecd",
+ "asech",
+ "asin",
+ "asind",
+ "asinh",
+ "assert",
+ "assignin",
+ "atan",
+ "atan2",
+ "atan2d",
+ "atand",
+ "atanh",
+ "audiodevinfo",
+ "audiodevreset",
+ "audioinfo",
+ "audioplayer",
+ "audioread",
+ "audiorecorder",
+ "audiowrite",
+ "autumn",
+ "axes",
+ "axis",
+ "axtoolbar",
+ "axtoolbarbtn",
+ "balance",
+ "bandwidth",
+ "bar",
+ "bar3",
+ "bar3h",
+ "barh",
+ "barycentricToCartesian",
+ "base2dec",
+ "batchStartupOptionUsed",
+ "bctree",
+ "beep",
+ "bench",
+ "besselh",
+ "besseli",
+ "besselj",
+ "besselk",
+ "bessely",
+ "beta",
+ "betainc",
+ "betaincinv",
+ "betaln",
+ "between",
+ "bfsearch",
+ "bicg",
+ "bicgstab",
+ "bicgstabl",
+ "biconncomp",
+ "bin2dec",
+ "binary",
+ "binscatter",
+ "bitand",
+ "bitcmp",
+ "bitget",
+ "bitnot",
+ "bitor",
+ "bitset",
+ "bitshift",
+ "bitxor",
+ "blanks",
+ "ble",
+ "blelist",
+ "blkdiag",
+ "bluetooth",
+ "bluetoothlist",
+ "bone",
+ "boundary",
+ "boundaryFacets",
+ "boundaryshape",
+ "boundingbox",
+ "bounds",
+ "box",
+ "boxchart",
+ "brighten",
+ "brush",
+ "bsxfun",
+ "bubblechart",
+ "bubblechart3",
+ "bubblelegend",
+ "bubblelim",
+ "bubblesize",
+ "builddocsearchdb",
+ "builtin",
+ "bvp4c",
+ "bvp5c",
+ "bvpget",
+ "bvpinit",
+ "bvpset",
+ "bvpxtend",
+ "caldays",
+ "caldiff",
+ "calendar",
+ "calendarDuration",
+ "calllib",
+ "calmonths",
+ "calquarters",
+ "calweeks",
+ "calyears",
+ "camdolly",
+ "cameratoolbar",
+ "camlight",
+ "camlookat",
+ "camorbit",
+ "campan",
+ "campos",
+ "camproj",
+ "camroll",
+ "camtarget",
+ "camup",
+ "camva",
+ "camzoom",
+ "canUseGPU",
+ "canUseParallelPool",
+ "cart2pol",
+ "cart2sph",
+ "cartesianToBarycentric",
+ "caseInsensitivePattern",
+ "caseSensitivePattern",
+ "cast",
+ "cat",
+ "categorical",
+ "categories",
+ "caxis",
+ "cd",
+ "cdf2rdf",
+ "cdfepoch",
+ "cdfinfo",
+ "cdflib",
+ "cdfread",
+ "ceil",
+ "cell",
+ "cell2mat",
+ "cell2struct",
+ "cell2table",
+ "celldisp",
+ "cellfun",
+ "cellplot",
+ "cellstr",
+ "centrality",
+ "centroid",
+ "cgs",
+ "char",
+ "characterListPattern",
+ "characteristic",
+ "checkcode",
+ "chol",
+ "cholupdate",
+ "choose",
+ "chooseContextMenu",
+ "circshift",
+ "circumcenter",
+ "cla",
+ "clabel",
+ "class",
+ "classUnderlying",
+ "clc",
+ "clear",
+ "clearAllMemoizedCaches",
+ "clearPersonalValue",
+ "clearTemporaryValue",
+ "clearpoints",
+ "clearvars",
+ "clf",
+ "clibArray",
+ "clibConvertArray",
+ "clibIsNull",
+ "clibIsReadOnly",
+ "clibRelease",
+ "clibgen.buildInterface",
+ "clibgen.generateLibraryDefinition",
+ "clipboard",
+ "clock",
+ "clone",
+ "close",
+ "closeFile",
+ "closereq",
+ "cmap2gray",
+ "cmpermute",
+ "cmunique",
+ "codeCompatibilityReport",
+ "colamd",
+ "collapse",
+ "colon",
+ "colorbar",
+ "colorcube",
+ "colormap",
+ "colororder",
+ "colperm",
+ "com.mathworks.engine.MatlabEngine",
+ "com.mathworks.matlab.types.CellStr",
+ "com.mathworks.matlab.types.Complex",
+ "com.mathworks.matlab.types.HandleObject",
+ "com.mathworks.matlab.types.Struct",
+ "combine",
+ "comet",
+ "comet3",
+ "compan",
+ "compass",
+ "complex",
+ "compose",
+ "computer",
+ "comserver",
+ "cond",
+ "condeig",
+ "condensation",
+ "condest",
+ "coneplot",
+ "configureCallback",
+ "configureTerminator",
+ "conj",
+ "conncomp",
+ "containers.Map",
+ "contains",
+ "containsrange",
+ "contour",
+ "contour3",
+ "contourc",
+ "contourf",
+ "contourslice",
+ "contrast",
+ "conv",
+ "conv2",
+ "convertCharsToStrings",
+ "convertContainedStringsToChars",
+ "convertStringsToChars",
+ "convertTo",
+ "convertvars",
+ "convexHull",
+ "convhull",
+ "convhulln",
+ "convn",
+ "cool",
+ "copper",
+ "copyHDU",
+ "copyfile",
+ "copygraphics",
+ "copyobj",
+ "corrcoef",
+ "cos",
+ "cosd",
+ "cosh",
+ "cospi",
+ "cot",
+ "cotd",
+ "coth",
+ "count",
+ "countcats",
+ "cov",
+ "cplxpair",
+ "cputime",
+ "createCategory",
+ "createFile",
+ "createImg",
+ "createLabel",
+ "createTbl",
+ "criticalAlpha",
+ "cross",
+ "csc",
+ "cscd",
+ "csch",
+ "ctranspose",
+ "cummax",
+ "cummin",
+ "cumprod",
+ "cumsum",
+ "cumtrapz",
+ "curl",
+ "currentProject",
+ "cylinder",
+ "daspect",
+ "dataTipInteraction",
+ "dataTipTextRow",
+ "datacursormode",
+ "datastore",
+ "datatip",
+ "date",
+ "datenum",
+ "dateshift",
+ "datestr",
+ "datetick",
+ "datetime",
+ "datevec",
+ "day",
+ "days",
+ "dbclear",
+ "dbcont",
+ "dbdown",
+ "dbmex",
+ "dbquit",
+ "dbstack",
+ "dbstatus",
+ "dbstep",
+ "dbstop",
+ "dbtype",
+ "dbup",
+ "dde23",
+ "ddeget",
+ "ddensd",
+ "ddesd",
+ "ddeset",
+ "deblank",
+ "dec2base",
+ "dec2bin",
+ "dec2hex",
+ "decic",
+ "decomposition",
+ "deconv",
+ "deg2rad",
+ "degree",
+ "del2",
+ "delaunay",
+ "delaunayTriangulation",
+ "delaunayn",
+ "delete",
+ "deleteCol",
+ "deleteFile",
+ "deleteHDU",
+ "deleteKey",
+ "deleteRecord",
+ "deleteRows",
+ "delevent",
+ "delimitedTextImportOptions",
+ "delsample",
+ "delsamplefromcollection",
+ "demo",
+ "descriptor",
+ "det",
+ "details",
+ "detectImportOptions",
+ "detrend",
+ "deval",
+ "dfsearch",
+ "diag",
+ "dialog",
+ "diary",
+ "diff",
+ "diffuse",
+ "digitBoundary",
+ "digitsPattern",
+ "digraph",
+ "dir",
+ "disableDefaultInteractivity",
+ "discretize",
+ "disp",
+ "display",
+ "dissect",
+ "distances",
+ "dither",
+ "divergence",
+ "dmperm",
+ "doc",
+ "docsearch",
+ "dos",
+ "dot",
+ "double",
+ "drag",
+ "dragrect",
+ "drawnow",
+ "dsearchn",
+ "duration",
+ "dynamicprops",
+ "echo",
+ "echodemo",
+ "echotcpip",
+ "edgeAttachments",
+ "edgecount",
+ "edges",
+ "edit",
+ "eig",
+ "eigs",
+ "ellipj",
+ "ellipke",
+ "ellipsoid",
+ "empty",
+ "enableDefaultInteractivity",
+ "enableLegacyExplorationModes",
+ "enableNETfromNetworkDrive",
+ "enableservice",
+ "endsWith",
+ "enumeration",
+ "eomday",
+ "eps",
+ "eq",
+ "equilibrate",
+ "erase",
+ "eraseBetween",
+ "erf",
+ "erfc",
+ "erfcinv",
+ "erfcx",
+ "erfinv",
+ "error",
+ "errorbar",
+ "errordlg",
+ "etime",
+ "etree",
+ "etreeplot",
+ "eval",
+ "evalc",
+ "evalin",
+ "event.ClassInstanceEvent",
+ "event.DynamicPropertyEvent",
+ "event.EventData",
+ "event.PropertyEvent",
+ "event.hasListener",
+ "event.listener",
+ "event.proplistener",
+ "eventlisteners",
+ "events",
+ "exceltime",
+ "exist",
+ "exit",
+ "exp",
+ "expand",
+ "expint",
+ "expm",
+ "expm1",
+ "export",
+ "export2wsdlg",
+ "exportapp",
+ "exportgraphics",
+ "exportsetupdlg",
+ "extract",
+ "extractAfter",
+ "extractBefore",
+ "extractBetween",
+ "eye",
+ "ezpolar",
+ "faceNormal",
+ "factor",
+ "factorial",
+ "false",
+ "fclose",
+ "fcontour",
+ "feather",
+ "featureEdges",
+ "feof",
+ "ferror",
+ "feval",
+ "fewerbins",
+ "fft",
+ "fft2",
+ "fftn",
+ "fftshift",
+ "fftw",
+ "fgetl",
+ "fgets",
+ "fieldnames",
+ "figure",
+ "figurepalette",
+ "fileDatastore",
+ "fileMode",
+ "fileName",
+ "fileattrib",
+ "filemarker",
+ "fileparts",
+ "fileread",
+ "filesep",
+ "fill",
+ "fill3",
+ "fillmissing",
+ "filloutliers",
+ "filter",
+ "filter2",
+ "fimplicit",
+ "fimplicit3",
+ "find",
+ "findCategory",
+ "findEvent",
+ "findFile",
+ "findLabel",
+ "findall",
+ "findedge",
+ "findfigs",
+ "findgroups",
+ "findnode",
+ "findobj",
+ "findprop",
+ "finish",
+ "fitsdisp",
+ "fitsinfo",
+ "fitsread",
+ "fitswrite",
+ "fix",
+ "fixedWidthImportOptions",
+ "flag",
+ "flintmax",
+ "flip",
+ "flipedge",
+ "fliplr",
+ "flipud",
+ "floor",
+ "flow",
+ "flush",
+ "fmesh",
+ "fminbnd",
+ "fminsearch",
+ "fopen",
+ "format",
+ "fplot",
+ "fplot3",
+ "fprintf",
+ "frame2im",
+ "fread",
+ "freeBoundary",
+ "freqspace",
+ "frewind",
+ "fscanf",
+ "fseek",
+ "fsurf",
+ "ftell",
+ "ftp",
+ "full",
+ "fullfile",
+ "func2str",
+ "function_handle",
+ "functions",
+ "functiontests",
+ "funm",
+ "fwrite",
+ "fzero",
+ "gallery",
+ "gamma",
+ "gammainc",
+ "gammaincinv",
+ "gammaln",
+ "gather",
+ "gca",
+ "gcbf",
+ "gcbo",
+ "gcd",
+ "gcf",
+ "gcmr",
+ "gco",
+ "genpath",
+ "geoaxes",
+ "geobasemap",
+ "geobubble",
+ "geodensityplot",
+ "geolimits",
+ "geoplot",
+ "geoscatter",
+ "geotickformat",
+ "get",
+ "getAColParms",
+ "getAxes",
+ "getBColParms",
+ "getColName",
+ "getColType",
+ "getColorbar",
+ "getConstantValue",
+ "getEqColType",
+ "getFileFormats",
+ "getHDUnum",
+ "getHDUtype",
+ "getHdrSpace",
+ "getImgSize",
+ "getImgType",
+ "getLayout",
+ "getLegend",
+ "getMockHistory",
+ "getNumCols",
+ "getNumHDUs",
+ "getNumInputs",
+ "getNumInputsImpl",
+ "getNumOutputs",
+ "getNumOutputsImpl",
+ "getNumRows",
+ "getOpenFiles",
+ "getProfiles",
+ "getPropertyGroupsImpl",
+ "getReport",
+ "getTimeStr",
+ "getVersion",
+ "getabstime",
+ "getappdata",
+ "getaudiodata",
+ "getdatasamples",
+ "getdatasamplesize",
+ "getenv",
+ "getfield",
+ "getframe",
+ "getinterpmethod",
+ "getnext",
+ "getpinstatus",
+ "getpixelposition",
+ "getplayer",
+ "getpoints",
+ "getpref",
+ "getqualitydesc",
+ "getrangefromclass",
+ "getsamples",
+ "getsampleusingtime",
+ "gettimeseriesnames",
+ "gettsafteratevent",
+ "gettsafterevent",
+ "gettsatevent",
+ "gettsbeforeatevent",
+ "gettsbeforeevent",
+ "gettsbetweenevents",
+ "getvaropts",
+ "ginput",
+ "gmres",
+ "gobjects",
+ "gplot",
+ "grabcode",
+ "gradient",
+ "graph",
+ "gray",
+ "grid",
+ "griddata",
+ "griddatan",
+ "griddedInterpolant",
+ "groot",
+ "groupcounts",
+ "groupfilter",
+ "groupsummary",
+ "grouptransform",
+ "gsvd",
+ "gtext",
+ "guidata",
+ "guide",
+ "guihandles",
+ "gunzip",
+ "gzip",
+ "h5create",
+ "h5disp",
+ "h5info",
+ "h5read",
+ "h5readatt",
+ "h5write",
+ "h5writeatt",
+ "hadamard",
+ "handle",
+ "hankel",
+ "hasFactoryValue",
+ "hasFrame",
+ "hasGroup",
+ "hasPersonalValue",
+ "hasSetting",
+ "hasTemporaryValue",
+ "hasdata",
+ "hasnext",
+ "hdfan",
+ "hdfdf24",
+ "hdfdfr8",
+ "hdfh",
+ "hdfhd",
+ "hdfhe",
+ "hdfhx",
+ "hdfinfo",
+ "hdfml",
+ "hdfpt",
+ "hdfread",
+ "hdfv",
+ "hdfvf",
+ "hdfvh",
+ "hdfvs",
+ "head",
+ "heatmap",
+ "height",
+ "help",
+ "helpdlg",
+ "hess",
+ "hex2dec",
+ "hex2num",
+ "hgexport",
+ "hggroup",
+ "hgtransform",
+ "hidden",
+ "highlight",
+ "hilb",
+ "histcounts",
+ "histcounts2",
+ "histogram",
+ "histogram2",
+ "hms",
+ "hold",
+ "holes",
+ "home",
+ "horzcat",
+ "hot",
+ "hour",
+ "hours",
+ "hover",
+ "hsv",
+ "hsv2rgb",
+ "hypot",
+ "i",
+ "ichol",
+ "idealfilter",
+ "idivide",
+ "ifft",
+ "ifft2",
+ "ifftn",
+ "ifftshift",
+ "ilu",
+ "im2double",
+ "im2frame",
+ "im2gray",
+ "im2java",
+ "imag",
+ "image",
+ "imageDatastore",
+ "imagesc",
+ "imapprox",
+ "imfinfo",
+ "imformats",
+ "imgCompress",
+ "import",
+ "importdata",
+ "imread",
+ "imresize",
+ "imshow",
+ "imtile",
+ "imwrite",
+ "inShape",
+ "incenter",
+ "incidence",
+ "ind2rgb",
+ "ind2sub",
+ "indegree",
+ "inedges",
+ "infoImpl",
+ "inmem",
+ "inner2outer",
+ "innerjoin",
+ "inpolygon",
+ "input",
+ "inputParser",
+ "inputdlg",
+ "inputname",
+ "insertATbl",
+ "insertAfter",
+ "insertBTbl",
+ "insertBefore",
+ "insertCol",
+ "insertImg",
+ "insertRows",
+ "int16",
+ "int2str",
+ "int32",
+ "int64",
+ "int8",
+ "integral",
+ "integral2",
+ "integral3",
+ "interp1",
+ "interp2",
+ "interp3",
+ "interpft",
+ "interpn",
+ "interpstreamspeed",
+ "intersect",
+ "intmax",
+ "intmin",
+ "inv",
+ "invhilb",
+ "ipermute",
+ "iqr",
+ "isCompressedImg",
+ "isConnected",
+ "isDiscreteStateSpecificationMutableImpl",
+ "isDone",
+ "isDoneImpl",
+ "isInactivePropertyImpl",
+ "isInputComplexityMutableImpl",
+ "isInputDataTypeMutableImpl",
+ "isInputSizeMutableImpl",
+ "isInterior",
+ "isKey",
+ "isLoaded",
+ "isLocked",
+ "isMATLABReleaseOlderThan",
+ "isPartitionable",
+ "isShuffleable",
+ "isStringScalar",
+ "isTunablePropertyDataTypeMutableImpl",
+ "isUnderlyingType",
+ "isa",
+ "isaUnderlying",
+ "isappdata",
+ "isbanded",
+ "isbetween",
+ "iscalendarduration",
+ "iscategorical",
+ "iscategory",
+ "iscell",
+ "iscellstr",
+ "ischange",
+ "ischar",
+ "iscolumn",
+ "iscom",
+ "isdag",
+ "isdatetime",
+ "isdiag",
+ "isdst",
+ "isduration",
+ "isempty",
+ "isenum",
+ "isequal",
+ "isequaln",
+ "isevent",
+ "isfield",
+ "isfile",
+ "isfinite",
+ "isfloat",
+ "isfolder",
+ "isgraphics",
+ "ishandle",
+ "ishermitian",
+ "ishold",
+ "ishole",
+ "isinf",
+ "isinteger",
+ "isinterface",
+ "isinterior",
+ "isisomorphic",
+ "isjava",
+ "iskeyword",
+ "isletter",
+ "islocalmax",
+ "islocalmin",
+ "islogical",
+ "ismac",
+ "ismatrix",
+ "ismember",
+ "ismembertol",
+ "ismethod",
+ "ismissing",
+ "ismultigraph",
+ "isnan",
+ "isnat",
+ "isnumeric",
+ "isobject",
+ "isocaps",
+ "isocolors",
+ "isomorphism",
+ "isonormals",
+ "isordinal",
+ "isosurface",
+ "isoutlier",
+ "ispc",
+ "isplaying",
+ "ispref",
+ "isprime",
+ "isprop",
+ "isprotected",
+ "isreal",
+ "isrecording",
+ "isregular",
+ "isrow",
+ "isscalar",
+ "issimplified",
+ "issorted",
+ "issortedrows",
+ "isspace",
+ "issparse",
+ "isstring",
+ "isstrprop",
+ "isstruct",
+ "isstudent",
+ "issymmetric",
+ "istable",
+ "istall",
+ "istimetable",
+ "istril",
+ "istriu",
+ "isundefined",
+ "isunix",
+ "isvalid",
+ "isvarname",
+ "isvector",
+ "isweekend",
+ "j",
+ "javaArray",
+ "javaMethod",
+ "javaMethodEDT",
+ "javaObject",
+ "javaObjectEDT",
+ "javaaddpath",
+ "javachk",
+ "javaclasspath",
+ "javarmpath",
+ "jet",
+ "join",
+ "jsondecode",
+ "jsonencode",
+ "juliandate",
+ "keyboard",
+ "keys",
+ "kron",
+ "labeledge",
+ "labelnode",
+ "lag",
+ "laplacian",
+ "lastwarn",
+ "layout",
+ "lcm",
+ "ldl",
+ "leapseconds",
+ "legend",
+ "legendre",
+ "length",
+ "letterBoundary",
+ "lettersPattern",
+ "lib.pointer",
+ "libfunctions",
+ "libfunctionsview",
+ "libisloaded",
+ "libpointer",
+ "libstruct",
+ "license",
+ "light",
+ "lightangle",
+ "lighting",
+ "lin2mu",
+ "line",
+ "lineBoundary",
+ "lines",
+ "linkaxes",
+ "linkdata",
+ "linkprop",
+ "linsolve",
+ "linspace",
+ "listModifiedFiles",
+ "listRequiredFiles",
+ "listdlg",
+ "listener",
+ "listfonts",
+ "load",
+ "loadObjectImpl",
+ "loadlibrary",
+ "loadobj",
+ "localfunctions",
+ "log",
+ "log10",
+ "log1p",
+ "log2",
+ "logical",
+ "loglog",
+ "logm",
+ "logspace",
+ "lookAheadBoundary",
+ "lookBehindBoundary",
+ "lookfor",
+ "lower",
+ "ls",
+ "lscov",
+ "lsqminnorm",
+ "lsqnonneg",
+ "lsqr",
+ "lu",
+ "magic",
+ "makehgtform",
+ "makima",
+ "mapreduce",
+ "mapreducer",
+ "maskedPattern",
+ "mat2cell",
+ "mat2str",
+ "matches",
+ "matchpairs",
+ "material",
+ "matfile",
+ "matlab.System",
+ "matlab.addons.disableAddon",
+ "matlab.addons.enableAddon",
+ "matlab.addons.install",
+ "matlab.addons.installedAddons",
+ "matlab.addons.isAddonEnabled",
+ "matlab.addons.toolbox.installToolbox",
+ "matlab.addons.toolbox.installedToolboxes",
+ "matlab.addons.toolbox.packageToolbox",
+ "matlab.addons.toolbox.toolboxVersion",
+ "matlab.addons.toolbox.uninstallToolbox",
+ "matlab.addons.uninstall",
+ "matlab.apputil.create",
+ "matlab.apputil.getInstalledAppInfo",
+ "matlab.apputil.install",
+ "matlab.apputil.package",
+ "matlab.apputil.run",
+ "matlab.apputil.uninstall",
+ "matlab.codetools.requiredFilesAndProducts",
+ "matlab.engine.FutureResult",
+ "matlab.engine.MatlabEngine",
+ "matlab.engine.connect_matlab",
+ "matlab.engine.engineName",
+ "matlab.engine.find_matlab",
+ "matlab.engine.isEngineShared",
+ "matlab.engine.shareEngine",
+ "matlab.engine.start_matlab",
+ "matlab.exception.JavaException",
+ "matlab.exception.PyException",
+ "matlab.graphics.chartcontainer.ChartContainer",
+ "matlab.graphics.chartcontainer.mixin.Colorbar",
+ "matlab.graphics.chartcontainer.mixin.Legend",
+ "matlab.io.Datastore",
+ "matlab.io.datastore.BlockedFileSet",
+ "matlab.io.datastore.DsFileReader",
+ "matlab.io.datastore.DsFileSet",
+ "matlab.io.datastore.FileSet",
+ "matlab.io.datastore.FileWritable",
+ "matlab.io.datastore.FoldersPropertyProvider",
+ "matlab.io.datastore.HadoopLocationBased",
+ "matlab.io.datastore.Partitionable",
+ "matlab.io.datastore.Shuffleable",
+ "matlab.io.hdf4.sd",
+ "matlab.io.hdfeos.gd",
+ "matlab.io.hdfeos.sw",
+ "matlab.io.saveVariablesToScript",
+ "matlab.lang.OnOffSwitchState",
+ "matlab.lang.correction.AppendArgumentsCorrection",
+ "matlab.lang.correction.ConvertToFunctionNotationCorrection",
+ "matlab.lang.correction.ReplaceIdentifierCorrection",
+ "matlab.lang.makeUniqueStrings",
+ "matlab.lang.makeValidName",
+ "matlab.mex.MexHost",
+ "matlab.mixin.Copyable",
+ "matlab.mixin.CustomDisplay",
+ "matlab.mixin.Heterogeneous",
+ "matlab.mixin.SetGet",
+ "matlab.mixin.SetGetExactNames",
+ "matlab.mixin.util.PropertyGroup",
+ "matlab.mock.AnyArguments",
+ "matlab.mock.InteractionHistory",
+ "matlab.mock.InteractionHistory.forMock",
+ "matlab.mock.MethodCallBehavior",
+ "matlab.mock.PropertyBehavior",
+ "matlab.mock.PropertyGetBehavior",
+ "matlab.mock.PropertySetBehavior",
+ "matlab.mock.TestCase",
+ "matlab.mock.actions.AssignOutputs",
+ "matlab.mock.actions.DoNothing",
+ "matlab.mock.actions.Invoke",
+ "matlab.mock.actions.ReturnStoredValue",
+ "matlab.mock.actions.StoreValue",
+ "matlab.mock.actions.ThrowException",
+ "matlab.mock.constraints.Occurred",
+ "matlab.mock.constraints.WasAccessed",
+ "matlab.mock.constraints.WasCalled",
+ "matlab.mock.constraints.WasSet",
+ "matlab.net.ArrayFormat",
+ "matlab.net.QueryParameter",
+ "matlab.net.URI",
+ "matlab.net.base64decode",
+ "matlab.net.base64encode",
+ "matlab.net.http.AuthInfo",
+ "matlab.net.http.AuthenticationScheme",
+ "matlab.net.http.Cookie",
+ "matlab.net.http.CookieInfo",
+ "matlab.net.http.Credentials",
+ "matlab.net.http.Disposition",
+ "matlab.net.http.HTTPException",
+ "matlab.net.http.HTTPOptions",
+ "matlab.net.http.HeaderField",
+ "matlab.net.http.LogRecord",
+ "matlab.net.http.MediaType",
+ "matlab.net.http.Message",
+ "matlab.net.http.MessageBody",
+ "matlab.net.http.MessageType",
+ "matlab.net.http.ProgressMonitor",
+ "matlab.net.http.ProtocolVersion",
+ "matlab.net.http.RequestLine",
+ "matlab.net.http.RequestMessage",
+ "matlab.net.http.RequestMethod",
+ "matlab.net.http.ResponseMessage",
+ "matlab.net.http.StartLine",
+ "matlab.net.http.StatusClass",
+ "matlab.net.http.StatusCode",
+ "matlab.net.http.StatusLine",
+ "matlab.net.http.field.AcceptField",
+ "matlab.net.http.field.AuthenticateField",
+ "matlab.net.http.field.AuthenticationInfoField",
+ "matlab.net.http.field.AuthorizationField",
+ "matlab.net.http.field.ContentDispositionField",
+ "matlab.net.http.field.ContentLengthField",
+ "matlab.net.http.field.ContentLocationField",
+ "matlab.net.http.field.ContentTypeField",
+ "matlab.net.http.field.CookieField",
+ "matlab.net.http.field.DateField",
+ "matlab.net.http.field.GenericField",
+ "matlab.net.http.field.GenericParameterizedField",
+ "matlab.net.http.field.HTTPDateField",
+ "matlab.net.http.field.IntegerField",
+ "matlab.net.http.field.LocationField",
+ "matlab.net.http.field.MediaRangeField",
+ "matlab.net.http.field.SetCookieField",
+ "matlab.net.http.field.URIReferenceField",
+ "matlab.net.http.io.BinaryConsumer",
+ "matlab.net.http.io.ContentConsumer",
+ "matlab.net.http.io.ContentProvider",
+ "matlab.net.http.io.FileConsumer",
+ "matlab.net.http.io.FileProvider",
+ "matlab.net.http.io.FormProvider",
+ "matlab.net.http.io.GenericConsumer",
+ "matlab.net.http.io.GenericProvider",
+ "matlab.net.http.io.ImageConsumer",
+ "matlab.net.http.io.ImageProvider",
+ "matlab.net.http.io.JSONConsumer",
+ "matlab.net.http.io.JSONProvider",
+ "matlab.net.http.io.MultipartConsumer",
+ "matlab.net.http.io.MultipartFormProvider",
+ "matlab.net.http.io.MultipartProvider",
+ "matlab.net.http.io.StringConsumer",
+ "matlab.net.http.io.StringProvider",
+ "matlab.perftest.FixedTimeExperiment",
+ "matlab.perftest.FrequentistTimeExperiment",
+ "matlab.perftest.TestCase",
+ "matlab.perftest.TimeExperiment",
+ "matlab.perftest.TimeResult",
+ "matlab.project.Project",
+ "matlab.project.convertDefinitionFiles",
+ "matlab.project.createProject",
+ "matlab.project.deleteProject",
+ "matlab.project.loadProject",
+ "matlab.project.rootProject",
+ "matlab.settings.FactoryGroup.createToolboxGroup",
+ "matlab.settings.SettingsFileUpgrader",
+ "matlab.settings.loadSettingsCompatibilityResults",
+ "matlab.settings.mustBeIntegerScalar",
+ "matlab.settings.mustBeLogicalScalar",
+ "matlab.settings.mustBeNumericScalar",
+ "matlab.settings.mustBeStringScalar",
+ "matlab.settings.reloadFactoryFile",
+ "matlab.system.mixin.FiniteSource",
+ "matlab.tall.blockMovingWindow",
+ "matlab.tall.movingWindow",
+ "matlab.tall.reduce",
+ "matlab.tall.transform",
+ "matlab.test.behavior.Missing",
+ "matlab.ui.componentcontainer.ComponentContainer",
+ "matlab.uitest.TestCase",
+ "matlab.uitest.TestCase.forInteractiveUse",
+ "matlab.uitest.unlock",
+ "matlab.unittest.Test",
+ "matlab.unittest.TestCase",
+ "matlab.unittest.TestResult",
+ "matlab.unittest.TestRunner",
+ "matlab.unittest.TestSuite",
+ "matlab.unittest.constraints.BooleanConstraint",
+ "matlab.unittest.constraints.Constraint",
+ "matlab.unittest.constraints.Tolerance",
+ "matlab.unittest.diagnostics.ConstraintDiagnostic",
+ "matlab.unittest.diagnostics.Diagnostic",
+ "matlab.unittest.fixtures.Fixture",
+ "matlab.unittest.measurement.DefaultMeasurementResult",
+ "matlab.unittest.measurement.MeasurementResult",
+ "matlab.unittest.measurement.chart.ComparisonPlot",
+ "matlab.unittest.plugins.OutputStream",
+ "matlab.unittest.plugins.Parallelizable",
+ "matlab.unittest.plugins.QualifyingPlugin",
+ "matlab.unittest.plugins.TestRunnerPlugin",
+ "matlab.wsdl.createWSDLClient",
+ "matlab.wsdl.setWSDLToolPath",
+ "matlabRelease",
+ "matlabrc",
+ "matlabroot",
+ "max",
+ "maxflow",
+ "maxk",
+ "mean",
+ "median",
+ "memmapfile",
+ "memoize",
+ "memory",
+ "mergecats",
+ "mergevars",
+ "mesh",
+ "meshc",
+ "meshgrid",
+ "meshz",
+ "meta.ArrayDimension",
+ "meta.DynamicProperty",
+ "meta.EnumeratedValue",
+ "meta.FixedDimension",
+ "meta.MetaData",
+ "meta.UnrestrictedDimension",
+ "meta.Validation",
+ "meta.abstractDetails",
+ "meta.class",
+ "meta.class.fromName",
+ "meta.event",
+ "meta.method",
+ "meta.package",
+ "meta.package.fromName",
+ "meta.package.getAllPackages",
+ "meta.property",
+ "metaclass",
+ "methods",
+ "methodsview",
+ "mex",
+ "mexext",
+ "mexhost",
+ "mfilename",
+ "mget",
+ "milliseconds",
+ "min",
+ "mink",
+ "minres",
+ "minspantree",
+ "minute",
+ "minutes",
+ "mislocked",
+ "missing",
+ "mkdir",
+ "mkpp",
+ "mldivide",
+ "mlintrpt",
+ "mlock",
+ "mmfileinfo",
+ "mod",
+ "mode",
+ "month",
+ "more",
+ "morebins",
+ "movAbsHDU",
+ "movNamHDU",
+ "movRelHDU",
+ "move",
+ "movefile",
+ "movegui",
+ "movevars",
+ "movie",
+ "movmad",
+ "movmax",
+ "movmean",
+ "movmedian",
+ "movmin",
+ "movprod",
+ "movstd",
+ "movsum",
+ "movvar",
+ "mpower",
+ "mput",
+ "mrdivide",
+ "msgbox",
+ "mtimes",
+ "mu2lin",
+ "multibandread",
+ "multibandwrite",
+ "munlock",
+ "mustBeA",
+ "mustBeFile",
+ "mustBeFinite",
+ "mustBeFloat",
+ "mustBeFolder",
+ "mustBeGreaterThan",
+ "mustBeGreaterThanOrEqual",
+ "mustBeInRange",
+ "mustBeInteger",
+ "mustBeLessThan",
+ "mustBeLessThanOrEqual",
+ "mustBeMember",
+ "mustBeNegative",
+ "mustBeNonNan",
+ "mustBeNonempty",
+ "mustBeNonmissing",
+ "mustBeNonnegative",
+ "mustBeNonpositive",
+ "mustBeNonsparse",
+ "mustBeNonzero",
+ "mustBeNonzeroLengthText",
+ "mustBeNumeric",
+ "mustBeNumericOrLogical",
+ "mustBePositive",
+ "mustBeReal",
+ "mustBeScalarOrEmpty",
+ "mustBeText",
+ "mustBeTextScalar",
+ "mustBeUnderlyingType",
+ "mustBeValidVariableName",
+ "mustBeVector",
+ "namedPattern",
+ "namedargs2cell",
+ "namelengthmax",
+ "nargin",
+ "narginchk",
+ "nargout",
+ "nargoutchk",
+ "native2unicode",
+ "nccreate",
+ "ncdisp",
+ "nchoosek",
+ "ncinfo",
+ "ncread",
+ "ncreadatt",
+ "ncwrite",
+ "ncwriteatt",
+ "ncwriteschema",
+ "ndgrid",
+ "ndims",
+ "nearest",
+ "nearestNeighbor",
+ "nearestvertex",
+ "neighbors",
+ "netcdf.abort",
+ "netcdf.close",
+ "netcdf.copyAtt",
+ "netcdf.create",
+ "netcdf.defDim",
+ "netcdf.defGrp",
+ "netcdf.defVar",
+ "netcdf.defVarChunking",
+ "netcdf.defVarDeflate",
+ "netcdf.defVarFill",
+ "netcdf.defVarFletcher32",
+ "netcdf.delAtt",
+ "netcdf.endDef",
+ "netcdf.getAtt",
+ "netcdf.getChunkCache",
+ "netcdf.getConstant",
+ "netcdf.getConstantNames",
+ "netcdf.getVar",
+ "netcdf.inq",
+ "netcdf.inqAtt",
+ "netcdf.inqAttID",
+ "netcdf.inqAttName",
+ "netcdf.inqDim",
+ "netcdf.inqDimID",
+ "netcdf.inqDimIDs",
+ "netcdf.inqFormat",
+ "netcdf.inqGrpName",
+ "netcdf.inqGrpNameFull",
+ "netcdf.inqGrpParent",
+ "netcdf.inqGrps",
+ "netcdf.inqLibVers",
+ "netcdf.inqNcid",
+ "netcdf.inqUnlimDims",
+ "netcdf.inqVar",
+ "netcdf.inqVarChunking",
+ "netcdf.inqVarDeflate",
+ "netcdf.inqVarFill",
+ "netcdf.inqVarFletcher32",
+ "netcdf.inqVarID",
+ "netcdf.inqVarIDs",
+ "netcdf.open",
+ "netcdf.putAtt",
+ "netcdf.putVar",
+ "netcdf.reDef",
+ "netcdf.renameAtt",
+ "netcdf.renameDim",
+ "netcdf.renameVar",
+ "netcdf.setChunkCache",
+ "netcdf.setDefaultFormat",
+ "netcdf.setFill",
+ "netcdf.sync",
+ "newline",
+ "newplot",
+ "nextpow2",
+ "nexttile",
+ "nnz",
+ "nonzeros",
+ "norm",
+ "normalize",
+ "normest",
+ "notify",
+ "now",
+ "nsidedpoly",
+ "nthroot",
+ "nufft",
+ "nufftn",
+ "null",
+ "num2cell",
+ "num2hex",
+ "num2ruler",
+ "num2str",
+ "numArgumentsFromSubscript",
+ "numRegions",
+ "numboundaries",
+ "numedges",
+ "numel",
+ "numnodes",
+ "numpartitions",
+ "numsides",
+ "nzmax",
+ "ode113",
+ "ode15i",
+ "ode15s",
+ "ode23",
+ "ode23s",
+ "ode23t",
+ "ode23tb",
+ "ode45",
+ "odeget",
+ "odeset",
+ "odextend",
+ "onCleanup",
+ "ones",
+ "open",
+ "openDiskFile",
+ "openFile",
+ "openProject",
+ "openfig",
+ "opengl",
+ "openvar",
+ "optimget",
+ "optimset",
+ "optionalPattern",
+ "ordeig",
+ "orderfields",
+ "ordqz",
+ "ordschur",
+ "orient",
+ "orth",
+ "outdegree",
+ "outedges",
+ "outerjoin",
+ "overlaps",
+ "overlapsrange",
+ "pack",
+ "pad",
+ "padecoef",
+ "pagectranspose",
+ "pagemtimes",
+ "pagetranspose",
+ "pan",
+ "panInteraction",
+ "parallelplot",
+ "pareto",
+ "parquetDatastore",
+ "parquetinfo",
+ "parquetread",
+ "parquetwrite",
+ "partition",
+ "parula",
+ "pascal",
+ "patch",
+ "path",
+ "pathsep",
+ "pathtool",
+ "pattern",
+ "pause",
+ "pbaspect",
+ "pcg",
+ "pchip",
+ "pcode",
+ "pcolor",
+ "pdepe",
+ "pdeval",
+ "peaks",
+ "perimeter",
+ "perl",
+ "perms",
+ "permute",
+ "pi",
+ "pie",
+ "pie3",
+ "pink",
+ "pinv",
+ "planerot",
+ "play",
+ "playblocking",
+ "plot",
+ "plot3",
+ "plotbrowser",
+ "plotedit",
+ "plotmatrix",
+ "plottools",
+ "plus",
+ "pointLocation",
+ "pol2cart",
+ "polaraxes",
+ "polarbubblechart",
+ "polarhistogram",
+ "polarplot",
+ "polarscatter",
+ "poly",
+ "polyarea",
+ "polybuffer",
+ "polyder",
+ "polyeig",
+ "polyfit",
+ "polyint",
+ "polyshape",
+ "polyval",
+ "polyvalm",
+ "posixtime",
+ "possessivePattern",
+ "pow2",
+ "ppval",
+ "predecessors",
+ "prefdir",
+ "preferences",
+ "press",
+ "preview",
+ "primes",
+ "print",
+ "printdlg",
+ "printopt",
+ "printpreview",
+ "prism",
+ "processInputSpecificationChangeImpl",
+ "processTunedPropertiesImpl",
+ "prod",
+ "profile",
+ "propedit",
+ "properties",
+ "propertyeditor",
+ "psi",
+ "publish",
+ "pwd",
+ "pyargs",
+ "pyenv",
+ "qmr",
+ "qr",
+ "qrdelete",
+ "qrinsert",
+ "qrupdate",
+ "quad2d",
+ "quadgk",
+ "quarter",
+ "questdlg",
+ "quit",
+ "quiver",
+ "quiver3",
+ "qz",
+ "rad2deg",
+ "rand",
+ "randi",
+ "randn",
+ "randperm",
+ "rank",
+ "rat",
+ "rats",
+ "rbbox",
+ "rcond",
+ "read",
+ "readATblHdr",
+ "readBTblHdr",
+ "readCard",
+ "readCol",
+ "readFrame",
+ "readImg",
+ "readKey",
+ "readKeyCmplx",
+ "readKeyDbl",
+ "readKeyLongLong",
+ "readKeyLongStr",
+ "readKeyUnit",
+ "readRecord",
+ "readall",
+ "readcell",
+ "readline",
+ "readlines",
+ "readmatrix",
+ "readstruct",
+ "readtable",
+ "readtimetable",
+ "readvars",
+ "real",
+ "reallog",
+ "realmax",
+ "realmin",
+ "realpow",
+ "realsqrt",
+ "record",
+ "recordblocking",
+ "rectangle",
+ "rectint",
+ "recycle",
+ "reducepatch",
+ "reducevolume",
+ "refresh",
+ "refreshSourceControl",
+ "refreshdata",
+ "regexp",
+ "regexpPattern",
+ "regexpi",
+ "regexprep",
+ "regexptranslate",
+ "regionZoomInteraction",
+ "regions",
+ "registerevent",
+ "regmatlabserver",
+ "rehash",
+ "relationaloperators",
+ "release",
+ "releaseImpl",
+ "reload",
+ "rem",
+ "remove",
+ "removeCategory",
+ "removeFile",
+ "removeGroup",
+ "removeLabel",
+ "removePath",
+ "removeReference",
+ "removeSetting",
+ "removeShortcut",
+ "removeShutdownFile",
+ "removeStartupFile",
+ "removeStyle",
+ "removeToolbarExplorationButtons",
+ "removecats",
+ "removets",
+ "removevars",
+ "rename",
+ "renamecats",
+ "renamevars",
+ "rendererinfo",
+ "reordercats",
+ "reordernodes",
+ "repelem",
+ "replace",
+ "replaceBetween",
+ "repmat",
+ "resample",
+ "rescale",
+ "reset",
+ "resetImpl",
+ "reshape",
+ "residue",
+ "restoredefaultpath",
+ "resume",
+ "rethrow",
+ "retime",
+ "reverse",
+ "rgb2gray",
+ "rgb2hsv",
+ "rgb2ind",
+ "rgbplot",
+ "ribbon",
+ "rlim",
+ "rmappdata",
+ "rmboundary",
+ "rmdir",
+ "rmedge",
+ "rmfield",
+ "rmholes",
+ "rmmissing",
+ "rmnode",
+ "rmoutliers",
+ "rmpath",
+ "rmpref",
+ "rmprop",
+ "rmslivers",
+ "rng",
+ "roots",
+ "rosser",
+ "rot90",
+ "rotate",
+ "rotate3d",
+ "rotateInteraction",
+ "round",
+ "rowfun",
+ "rows2vars",
+ "rref",
+ "rsf2csf",
+ "rtickangle",
+ "rtickformat",
+ "rticklabels",
+ "rticks",
+ "ruler2num",
+ "rulerPanInteraction",
+ "run",
+ "runChecks",
+ "runperf",
+ "runtests",
+ "save",
+ "saveObjectImpl",
+ "saveas",
+ "savefig",
+ "saveobj",
+ "savepath",
+ "scale",
+ "scatter",
+ "scatter3",
+ "scatteredInterpolant",
+ "scatterhistogram",
+ "schur",
+ "scroll",
+ "sec",
+ "secd",
+ "sech",
+ "second",
+ "seconds",
+ "semilogx",
+ "semilogy",
+ "sendmail",
+ "serialport",
+ "serialportlist",
+ "set",
+ "setBscale",
+ "setCompressionType",
+ "setDTR",
+ "setHCompScale",
+ "setHCompSmooth",
+ "setProperties",
+ "setRTS",
+ "setTileDim",
+ "setTscale",
+ "setabstime",
+ "setappdata",
+ "setcats",
+ "setdiff",
+ "setenv",
+ "setfield",
+ "setinterpmethod",
+ "setpixelposition",
+ "setpref",
+ "settimeseriesnames",
+ "settings",
+ "setuniformtime",
+ "setup",
+ "setupImpl",
+ "setvaropts",
+ "setvartype",
+ "setxor",
+ "sgtitle",
+ "shading",
+ "sheetnames",
+ "shg",
+ "shiftdim",
+ "shortestpath",
+ "shortestpathtree",
+ "showplottool",
+ "shrinkfaces",
+ "shuffle",
+ "sign",
+ "simplify",
+ "sin",
+ "sind",
+ "single",
+ "sinh",
+ "sinpi",
+ "size",
+ "slice",
+ "smooth3",
+ "smoothdata",
+ "snapnow",
+ "sort",
+ "sortboundaries",
+ "sortregions",
+ "sortrows",
+ "sortx",
+ "sorty",
+ "sound",
+ "soundsc",
+ "spalloc",
+ "sparse",
+ "spaugment",
+ "spconvert",
+ "spdiags",
+ "specular",
+ "speye",
+ "spfun",
+ "sph2cart",
+ "sphere",
+ "spinmap",
+ "spline",
+ "split",
+ "splitapply",
+ "splitlines",
+ "splitvars",
+ "spones",
+ "spparms",
+ "sprand",
+ "sprandn",
+ "sprandsym",
+ "sprank",
+ "spreadsheetDatastore",
+ "spreadsheetImportOptions",
+ "spring",
+ "sprintf",
+ "spy",
+ "sqrt",
+ "sqrtm",
+ "squeeze",
+ "ss2tf",
+ "sscanf",
+ "stack",
+ "stackedplot",
+ "stairs",
+ "standardizeMissing",
+ "start",
+ "startat",
+ "startsWith",
+ "startup",
+ "std",
+ "stem",
+ "stem3",
+ "step",
+ "stepImpl",
+ "stlread",
+ "stlwrite",
+ "stop",
+ "str2double",
+ "str2func",
+ "str2num",
+ "strcat",
+ "strcmp",
+ "strcmpi",
+ "stream2",
+ "stream3",
+ "streamline",
+ "streamparticles",
+ "streamribbon",
+ "streamslice",
+ "streamtube",
+ "strfind",
+ "string",
+ "strings",
+ "strip",
+ "strjoin",
+ "strjust",
+ "strlength",
+ "strncmp",
+ "strncmpi",
+ "strrep",
+ "strsplit",
+ "strtok",
+ "strtrim",
+ "struct",
+ "struct2cell",
+ "struct2table",
+ "structfun",
+ "sub2ind",
+ "subgraph",
+ "subplot",
+ "subsasgn",
+ "subscribe",
+ "subsindex",
+ "subspace",
+ "subsref",
+ "substruct",
+ "subtitle",
+ "subtract",
+ "subvolume",
+ "successors",
+ "sum",
+ "summary",
+ "summer",
+ "superclasses",
+ "surf",
+ "surf2patch",
+ "surface",
+ "surfaceArea",
+ "surfc",
+ "surfl",
+ "surfnorm",
+ "svd",
+ "svds",
+ "svdsketch",
+ "swapbytes",
+ "swarmchart",
+ "swarmchart3",
+ "sylvester",
+ "symamd",
+ "symbfact",
+ "symmlq",
+ "symrcm",
+ "synchronize",
+ "sysobjupdate",
+ "system",
+ "table",
+ "table2array",
+ "table2cell",
+ "table2struct",
+ "table2timetable",
+ "tabularTextDatastore",
+ "tail",
+ "tall",
+ "tallrng",
+ "tan",
+ "tand",
+ "tanh",
+ "tar",
+ "tcpclient",
+ "tempdir",
+ "tempname",
+ "testsuite",
+ "tetramesh",
+ "texlabel",
+ "text",
+ "textBoundary",
+ "textscan",
+ "textwrap",
+ "tfqmr",
+ "thetalim",
+ "thetatickformat",
+ "thetaticklabels",
+ "thetaticks",
+ "thingSpeakRead",
+ "thingSpeakWrite",
+ "throw",
+ "throwAsCaller",
+ "tic",
+ "tiledlayout",
+ "time",
+ "timeit",
+ "timeofday",
+ "timer",
+ "timerange",
+ "timerfind",
+ "timerfindall",
+ "timeseries",
+ "timetable",
+ "timetable2table",
+ "timezones",
+ "title",
+ "toc",
+ "todatenum",
+ "toeplitz",
+ "toolboxdir",
+ "topkrows",
+ "toposort",
+ "trace",
+ "transclosure",
+ "transform",
+ "translate",
+ "transpose",
+ "transreduction",
+ "trapz",
+ "treelayout",
+ "treeplot",
+ "triangulation",
+ "tril",
+ "trimesh",
+ "triplot",
+ "trisurf",
+ "triu",
+ "true",
+ "tscollection",
+ "tsdata.event",
+ "tsearchn",
+ "turbo",
+ "turningdist",
+ "type",
+ "typecast",
+ "tzoffset",
+ "uialert",
+ "uiaxes",
+ "uibutton",
+ "uibuttongroup",
+ "uicheckbox",
+ "uiconfirm",
+ "uicontextmenu",
+ "uicontrol",
+ "uidatepicker",
+ "uidropdown",
+ "uieditfield",
+ "uifigure",
+ "uigauge",
+ "uigetdir",
+ "uigetfile",
+ "uigetpref",
+ "uigridlayout",
+ "uihtml",
+ "uiimage",
+ "uiknob",
+ "uilabel",
+ "uilamp",
+ "uilistbox",
+ "uimenu",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uint8",
+ "uiopen",
+ "uipanel",
+ "uiprogressdlg",
+ "uipushtool",
+ "uiputfile",
+ "uiradiobutton",
+ "uiresume",
+ "uisave",
+ "uisetcolor",
+ "uisetfont",
+ "uisetpref",
+ "uislider",
+ "uispinner",
+ "uistack",
+ "uistyle",
+ "uiswitch",
+ "uitab",
+ "uitabgroup",
+ "uitable",
+ "uitextarea",
+ "uitogglebutton",
+ "uitoggletool",
+ "uitoolbar",
+ "uitree",
+ "uitreenode",
+ "uiwait",
+ "uminus",
+ "underlyingType",
+ "underlyingValue",
+ "unicode2native",
+ "union",
+ "unique",
+ "uniquetol",
+ "unix",
+ "unloadlibrary",
+ "unmesh",
+ "unmkpp",
+ "unregisterallevents",
+ "unregisterevent",
+ "unstack",
+ "unsubscribe",
+ "untar",
+ "unwrap",
+ "unzip",
+ "update",
+ "updateDependencies",
+ "uplus",
+ "upper",
+ "usejava",
+ "userpath",
+ "validateFunctionSignaturesJSON",
+ "validateInputsImpl",
+ "validatePropertiesImpl",
+ "validateattributes",
+ "validatecolor",
+ "validatestring",
+ "values",
+ "vander",
+ "var",
+ "varargin",
+ "varargout",
+ "varfun",
+ "vartype",
+ "vecnorm",
+ "ver",
+ "verLessThan",
+ "version",
+ "vertcat",
+ "vertexAttachments",
+ "vertexNormal",
+ "view",
+ "viewmtx",
+ "visdiff",
+ "volume",
+ "volumebounds",
+ "voronoi",
+ "voronoiDiagram",
+ "voronoin",
+ "wait",
+ "waitbar",
+ "waitfor",
+ "waitforbuttonpress",
+ "warndlg",
+ "warning",
+ "waterfall",
+ "web",
+ "weboptions",
+ "webread",
+ "websave",
+ "webwrite",
+ "week",
+ "weekday",
+ "what",
+ "which",
+ "whitespaceBoundary",
+ "whitespacePattern",
+ "who",
+ "whos",
+ "width",
+ "wildcardPattern",
+ "wilkinson",
+ "winopen",
+ "winqueryreg",
+ "winter",
+ "withinrange",
+ "withtol",
+ "wordcloud",
+ "write",
+ "writeChecksum",
+ "writeCol",
+ "writeComment",
+ "writeDate",
+ "writeHistory",
+ "writeImg",
+ "writeKey",
+ "writeKeyUnit",
+ "writeVideo",
+ "writeall",
+ "writecell",
+ "writeline",
+ "writematrix",
+ "writestruct",
+ "writetable",
+ "writetimetable",
+ "xcorr",
+ "xcov",
+ "xlabel",
+ "xlim",
+ "xline",
+ "xmlread",
+ "xmlwrite",
+ "xor",
+ "xslt",
+ "xtickangle",
+ "xtickformat",
+ "xticklabels",
+ "xticks",
+ "year",
+ "years",
+ "ylabel",
+ "ylim",
+ "yline",
+ "ymd",
+ "ytickangle",
+ "ytickformat",
+ "yticklabels",
+ "yticks",
+ "yyaxis",
+ "yyyymmdd",
+ "zeros",
+ "zip",
+ "zlabel",
+ "zlim",
+ "zoom",
+ "zoomInteraction",
+ "ztickangle",
+ "ztickformat",
+ "zticklabels",
+ "zticks",
+ ],
+ prefix=r"(?<!\.)(", # Exclude field names
+ suffix=r")\b"
+ ),
+ Name.Builtin
+ ),
+
+ # line continuation with following comment:
+ (r'(\.\.\.)(.*)$', bygroups(Keyword, Comment)),
+
+ # command form:
+ # "How MATLAB Recognizes Command Syntax" specifies that an operator
+ # is recognized if it is either surrounded by spaces or by no
+ # spaces on both sides (this allows distinguishing `cd ./foo` from
+ # `cd ./ foo`.). Here, the regex checks that the first word in the
+ # line is not followed by <spaces> and then
+ # (equal | open-parenthesis | <operator><space> | <space>).
+ (r'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|%s\s|\s)' % _operators,
+ bygroups(Whitespace, Name, Whitespace), 'commandargs'),
+
+ include('expressions')
+ ],
+ 'blockcomment': [
+ (r'^\s*%\}', Comment.Multiline, '#pop'),
+ (r'^.*\n', Comment.Multiline),
+ (r'.', Comment.Multiline),
+ ],
+ 'deffunc': [
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ bygroups(Whitespace, Text, Whitespace, Punctuation,
+ Whitespace, Name.Function, Punctuation, Text,
+ Punctuation, Whitespace), '#pop'),
+ # function with no args
+ (r'(\s*)([a-zA-Z_]\w*)',
+ bygroups(Whitespace, Name.Function), '#pop'),
+ ],
+ 'propattrs': [
+ (r'(\w+)(\s*)(=)(\s*)(\d+)',
+ bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace,
+ Number)),
+ (r'(\w+)(\s*)(=)(\s*)([a-zA-Z]\w*)',
+ bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace,
+ Keyword)),
+ (r',', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ 'defprops': [
+ (r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
+ (r'%.*$', Comment),
+ (r'(?<!\.)end\b', Keyword, '#pop'),
+ include('expressions'),
+ ],
+ 'string': [
+ (r"[^']*'", String, '#pop'),
+ ],
+ 'commandargs': [
+ # If an equal sign or other operator is encountered, this
+ # isn't a command. It might be a variable assignment or
+ # comparison operation with multiple spaces before the
+ # equal sign or operator
+ (r"=", Punctuation, '#pop'),
+ (_operators, Operator, '#pop'),
+ (r"[ \t]+", Whitespace),
+ ("'[^']*'", String),
+ (r"[^';\s]+", String),
+ (";", Punctuation, '#pop'),
+ default('#pop'),
+ ]
+ }
+
+ def analyse_text(text):
+ # function declaration.
+ first_non_comment = next((line for line in text.splitlines()
+ if not re.match(r'^\s*%', text)), '').strip()
+ if (first_non_comment.startswith('function')
+ and '{' not in first_non_comment):
+ return 1.
+ # comment
+ elif re.search(r'^\s*%', text, re.M):
+ return 0.2
+ # system cmd
+ elif re.search(r'^!\w+', text, re.M):
+ return 0.2
+
+
+line_re = re.compile('.*?\n')
+
+
+class MatlabSessionLexer(Lexer):
+ """
+ For Matlab sessions. Modeled after PythonConsoleLexer.
+ Contributed by Ken Schutte <kschutte@csail.mit.edu>.
+
+ .. versionadded:: 0.10
+ """
+ name = 'Matlab session'
+ aliases = ['matlabsession']
+
+ def get_tokens_unprocessed(self, text):
+ mlexer = MatlabLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ continuation = False
+
+ for match in line_re.finditer(text):
+ line = match.group()
+
+ if line.startswith('>> '):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:3])]))
+ curcode += line[3:]
+
+ elif line.startswith('>>'):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:2])]))
+ curcode += line[2:]
+
+ elif line.startswith('???'):
+
+ idx = len(curcode)
+
+ # without is showing error on same line as before...?
+ # line = "\n" + line
+ token = (0, Generic.Traceback, line)
+ insertions.append((idx, [token]))
+ elif continuation and insertions:
+ # line_start is the length of the most recent prompt symbol
+ line_start = len(insertions[-1][-1][-1])
+ # Set leading spaces with the length of the prompt to be a generic prompt
+ # This keeps code aligned when prompts are removed, say with some Javascript
+ if line.startswith(' '*line_start):
+ insertions.append(
+ (len(curcode), [(0, Generic.Prompt, line[:line_start])]))
+ curcode += line[line_start:]
+ else:
+ curcode += line
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, mlexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+
+ yield match.start(), Generic.Output, line
+
+ # Does not allow continuation if a comment is included after the ellipses.
+ # Continues any line that ends with ..., even comments (lines that start with %)
+ if line.strip().endswith('...'):
+ continuation = True
+ else:
+ continuation = False
+
+ if curcode: # or item:
+ yield from do_insertions(
+ insertions, mlexer.get_tokens_unprocessed(curcode))
+
+
+class OctaveLexer(RegexLexer):
+ """
+ For GNU Octave source code.
+
+ .. versionadded:: 1.5
+ """
+ name = 'Octave'
+ url = 'https://www.gnu.org/software/octave/index'
+ aliases = ['octave']
+ filenames = ['*.m']
+ mimetypes = ['text/octave']
+
+ # These lists are generated automatically.
+ # Run the following in bash shell:
+ #
+ # First dump all of the Octave manual into a plain text file:
+ #
+ # $ info octave --subnodes -o octave-manual
+ #
+ # Now grep through it:
+
+ # for i in \
+ # "Built-in Function" "Command" "Function File" \
+ # "Loadable Function" "Mapping Function";
+ # do
+ # perl -e '@name = qw('"$i"');
+ # print lc($name[0]),"_kw = [\n"';
+ #
+ # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
+ # octave-manual | sort | uniq ;
+ # echo "]" ;
+ # echo;
+ # done
+
+ # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
+
+ builtin_kw = (
+ "addlistener", "addpath", "addproperty", "all",
+ "and", "any", "argnames", "argv", "assignin",
+ "atexit", "autoload",
+ "available_graphics_toolkits", "beep_on_error",
+ "bitand", "bitmax", "bitor", "bitshift", "bitxor",
+ "cat", "cell", "cellstr", "char", "class", "clc",
+ "columns", "command_line_path",
+ "completion_append_char", "completion_matches",
+ "complex", "confirm_recursive_rmdir", "cputime",
+ "crash_dumps_octave_core", "ctranspose", "cumprod",
+ "cumsum", "debug_on_error", "debug_on_interrupt",
+ "debug_on_warning", "default_save_options",
+ "dellistener", "diag", "diff", "disp",
+ "doc_cache_file", "do_string_escapes", "double",
+ "drawnow", "e", "echo_executing_commands", "eps",
+ "eq", "errno", "errno_list", "error", "eval",
+ "evalin", "exec", "exist", "exit", "eye", "false",
+ "fclear", "fclose", "fcntl", "fdisp", "feof",
+ "ferror", "feval", "fflush", "fgetl", "fgets",
+ "fieldnames", "file_in_loadpath", "file_in_path",
+ "filemarker", "filesep", "find_dir_in_path",
+ "fixed_point_format", "fnmatch", "fopen", "fork",
+ "formula", "fprintf", "fputs", "fread", "freport",
+ "frewind", "fscanf", "fseek", "fskipl", "ftell",
+ "functions", "fwrite", "ge", "genpath", "get",
+ "getegid", "getenv", "geteuid", "getgid",
+ "getpgrp", "getpid", "getppid", "getuid", "glob",
+ "gt", "gui_mode", "history_control",
+ "history_file", "history_size",
+ "history_timestamp_format_string", "home",
+ "horzcat", "hypot", "ifelse",
+ "ignore_function_time_stamp", "inferiorto",
+ "info_file", "info_program", "inline", "input",
+ "intmax", "intmin", "ipermute",
+ "is_absolute_filename", "isargout", "isbool",
+ "iscell", "iscellstr", "ischar", "iscomplex",
+ "isempty", "isfield", "isfloat", "isglobal",
+ "ishandle", "isieee", "isindex", "isinteger",
+ "islogical", "ismatrix", "ismethod", "isnull",
+ "isnumeric", "isobject", "isreal",
+ "is_rooted_relative_filename", "issorted",
+ "isstruct", "isvarname", "kbhit", "keyboard",
+ "kill", "lasterr", "lasterror", "lastwarn",
+ "ldivide", "le", "length", "link", "linspace",
+ "logical", "lstat", "lt", "make_absolute_filename",
+ "makeinfo_program", "max_recursion_depth", "merge",
+ "methods", "mfilename", "minus", "mislocked",
+ "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
+ "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
+ "munlock", "nargin", "nargout",
+ "native_float_format", "ndims", "ne", "nfields",
+ "nnz", "norm", "not", "numel", "nzmax",
+ "octave_config_info", "octave_core_file_limit",
+ "octave_core_file_name",
+ "octave_core_file_options", "ones", "or",
+ "output_max_field_width", "output_precision",
+ "page_output_immediately", "page_screen_output",
+ "path", "pathsep", "pause", "pclose", "permute",
+ "pi", "pipe", "plus", "popen", "power",
+ "print_empty_dimensions", "printf",
+ "print_struct_array_contents", "prod",
+ "program_invocation_name", "program_name",
+ "putenv", "puts", "pwd", "quit", "rats", "rdivide",
+ "readdir", "readlink", "read_readline_init_file",
+ "realmax", "realmin", "rehash", "rename",
+ "repelems", "re_read_readline_init_file", "reset",
+ "reshape", "resize", "restoredefaultpath",
+ "rethrow", "rmdir", "rmfield", "rmpath", "rows",
+ "save_header_format_string", "save_precision",
+ "saving_history", "scanf", "set", "setenv",
+ "shell_cmd", "sighup_dumps_octave_core",
+ "sigterm_dumps_octave_core", "silent_functions",
+ "single", "size", "size_equal", "sizemax",
+ "sizeof", "sleep", "source", "sparse_auto_mutate",
+ "split_long_rows", "sprintf", "squeeze", "sscanf",
+ "stat", "stderr", "stdin", "stdout", "strcmp",
+ "strcmpi", "string_fill_char", "strncmp",
+ "strncmpi", "struct", "struct_levels_to_print",
+ "strvcat", "subsasgn", "subsref", "sum", "sumsq",
+ "superiorto", "suppress_verbose_help_message",
+ "symlink", "system", "tic", "tilde_expand",
+ "times", "tmpfile", "tmpnam", "toc", "toupper",
+ "transpose", "true", "typeinfo", "umask", "uminus",
+ "uname", "undo_string_escapes", "unlink", "uplus",
+ "upper", "usage", "usleep", "vec", "vectorize",
+ "vertcat", "waitpid", "warning", "warranty",
+ "whos_line_format", "yes_or_no", "zeros",
+ "inf", "Inf", "nan", "NaN")
+
+ command_kw = ("close", "load", "who", "whos")
+
+ function_kw = (
+ "accumarray", "accumdim", "acosd", "acotd",
+ "acscd", "addtodate", "allchild", "ancestor",
+ "anova", "arch_fit", "arch_rnd", "arch_test",
+ "area", "arma_rnd", "arrayfun", "ascii", "asctime",
+ "asecd", "asind", "assert", "atand",
+ "autoreg_matrix", "autumn", "axes", "axis", "bar",
+ "barh", "bartlett", "bartlett_test", "beep",
+ "betacdf", "betainv", "betapdf", "betarnd",
+ "bicgstab", "bicubic", "binary", "binocdf",
+ "binoinv", "binopdf", "binornd", "bitcmp",
+ "bitget", "bitset", "blackman", "blanks",
+ "blkdiag", "bone", "box", "brighten", "calendar",
+ "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
+ "cauchy_rnd", "caxis", "celldisp", "center", "cgs",
+ "chisquare_test_homogeneity",
+ "chisquare_test_independence", "circshift", "cla",
+ "clabel", "clf", "clock", "cloglog", "closereq",
+ "colon", "colorbar", "colormap", "colperm",
+ "comet", "common_size", "commutation_matrix",
+ "compan", "compare_versions", "compass",
+ "computer", "cond", "condest", "contour",
+ "contourc", "contourf", "contrast", "conv",
+ "convhull", "cool", "copper", "copyfile", "cor",
+ "corrcoef", "cor_test", "cosd", "cotd", "cov",
+ "cplxpair", "cross", "cscd", "cstrcat", "csvread",
+ "csvwrite", "ctime", "cumtrapz", "curl", "cut",
+ "cylinder", "date", "datenum", "datestr",
+ "datetick", "datevec", "dblquad", "deal",
+ "deblank", "deconv", "delaunay", "delaunayn",
+ "delete", "demo", "detrend", "diffpara", "diffuse",
+ "dir", "discrete_cdf", "discrete_inv",
+ "discrete_pdf", "discrete_rnd", "display",
+ "divergence", "dlmwrite", "dos", "dsearch",
+ "dsearchn", "duplication_matrix", "durbinlevinson",
+ "ellipsoid", "empirical_cdf", "empirical_inv",
+ "empirical_pdf", "empirical_rnd", "eomday",
+ "errorbar", "etime", "etreeplot", "example",
+ "expcdf", "expinv", "expm", "exppdf", "exprnd",
+ "ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
+ "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
+ "factorial", "fail", "fcdf", "feather", "fftconv",
+ "fftfilt", "fftshift", "figure", "fileattrib",
+ "fileparts", "fill", "findall", "findobj",
+ "findstr", "finv", "flag", "flipdim", "fliplr",
+ "flipud", "fpdf", "fplot", "fractdiff", "freqz",
+ "freqz_plot", "frnd", "fsolve",
+ "f_test_regression", "ftp", "fullfile", "fzero",
+ "gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
+ "gcbf", "gcbo", "gcf", "genvarname", "geocdf",
+ "geoinv", "geopdf", "geornd", "getfield", "ginput",
+ "glpk", "gls", "gplot", "gradient",
+ "graphics_toolkit", "gray", "grid", "griddata",
+ "griddatan", "gtext", "gunzip", "gzip", "hadamard",
+ "hamming", "hankel", "hanning", "hggroup",
+ "hidden", "hilb", "hist", "histc", "hold", "hot",
+ "hotelling_test", "housh", "hsv", "hurst",
+ "hygecdf", "hygeinv", "hygepdf", "hygernd",
+ "idivide", "ifftshift", "image", "imagesc",
+ "imfinfo", "imread", "imshow", "imwrite", "index",
+ "info", "inpolygon", "inputname", "interpft",
+ "interpn", "intersect", "invhilb", "iqr", "isa",
+ "isdefinite", "isdir", "is_duplicate_entry",
+ "isequal", "isequalwithequalnans", "isfigure",
+ "ishermitian", "ishghandle", "is_leap_year",
+ "isletter", "ismac", "ismember", "ispc", "isprime",
+ "isprop", "isscalar", "issquare", "isstrprop",
+ "issymmetric", "isunix", "is_valid_file_id",
+ "isvector", "jet", "kendall",
+ "kolmogorov_smirnov_cdf",
+ "kolmogorov_smirnov_test", "kruskal_wallis_test",
+ "krylov", "kurtosis", "laplace_cdf", "laplace_inv",
+ "laplace_pdf", "laplace_rnd", "legend", "legendre",
+ "license", "line", "linkprop", "list_primes",
+ "loadaudio", "loadobj", "logistic_cdf",
+ "logistic_inv", "logistic_pdf", "logistic_rnd",
+ "logit", "loglog", "loglogerr", "logm", "logncdf",
+ "logninv", "lognpdf", "lognrnd", "logspace",
+ "lookfor", "ls_command", "lsqnonneg", "magic",
+ "mahalanobis", "manova", "matlabroot",
+ "mcnemar_test", "mean", "meansq", "median", "menu",
+ "mesh", "meshc", "meshgrid", "meshz", "mexext",
+ "mget", "mkpp", "mode", "moment", "movefile",
+ "mpoles", "mput", "namelengthmax", "nargchk",
+ "nargoutchk", "nbincdf", "nbininv", "nbinpdf",
+ "nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
+ "nonzeros", "normcdf", "normest", "norminv",
+ "normpdf", "normrnd", "now", "nthroot", "null",
+ "ocean", "ols", "onenormest", "optimget",
+ "optimset", "orderfields", "orient", "orth",
+ "pack", "pareto", "parseparams", "pascal", "patch",
+ "pathdef", "pcg", "pchip", "pcolor", "pcr",
+ "peaks", "periodogram", "perl", "perms", "pie",
+ "pink", "planerot", "playaudio", "plot",
+ "plotmatrix", "plotyy", "poisscdf", "poissinv",
+ "poisspdf", "poissrnd", "polar", "poly",
+ "polyaffine", "polyarea", "polyderiv", "polyfit",
+ "polygcd", "polyint", "polyout", "polyreduce",
+ "polyval", "polyvalm", "postpad", "powerset",
+ "ppder", "ppint", "ppjumps", "ppplot", "ppval",
+ "pqpnonneg", "prepad", "primes", "print",
+ "print_usage", "prism", "probit", "qp", "qqplot",
+ "quadcc", "quadgk", "quadl", "quadv", "quiver",
+ "qzhess", "rainbow", "randi", "range", "rank",
+ "ranks", "rat", "reallog", "realpow", "realsqrt",
+ "record", "rectangle_lw", "rectangle_sw",
+ "rectint", "refresh", "refreshdata",
+ "regexptranslate", "repmat", "residue", "ribbon",
+ "rindex", "roots", "rose", "rosser", "rotdim",
+ "rref", "run", "run_count", "rundemos", "run_test",
+ "runtests", "saveas", "saveaudio", "saveobj",
+ "savepath", "scatter", "secd", "semilogx",
+ "semilogxerr", "semilogy", "semilogyerr",
+ "setaudio", "setdiff", "setfield", "setxor",
+ "shading", "shift", "shiftdim", "sign_test",
+ "sinc", "sind", "sinetone", "sinewave", "skewness",
+ "slice", "sombrero", "sortrows", "spaugment",
+ "spconvert", "spdiags", "spearman", "spectral_adf",
+ "spectral_xdf", "specular", "speed", "spencer",
+ "speye", "spfun", "sphere", "spinmap", "spline",
+ "spones", "sprand", "sprandn", "sprandsym",
+ "spring", "spstats", "spy", "sqp", "stairs",
+ "statistics", "std", "stdnormal_cdf",
+ "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
+ "stem", "stft", "strcat", "strchr", "strjust",
+ "strmatch", "strread", "strsplit", "strtok",
+ "strtrim", "strtrunc", "structfun", "studentize",
+ "subplot", "subsindex", "subspace", "substr",
+ "substruct", "summer", "surf", "surface", "surfc",
+ "surfl", "surfnorm", "svds", "swapbytes",
+ "sylvester_matrix", "symvar", "synthesis", "table",
+ "tand", "tar", "tcdf", "tempdir", "tempname",
+ "test", "text", "textread", "textscan", "tinv",
+ "title", "toeplitz", "tpdf", "trace", "trapz",
+ "treelayout", "treeplot", "triangle_lw",
+ "triangle_sw", "tril", "trimesh", "triplequad",
+ "triplot", "trisurf", "triu", "trnd", "tsearchn",
+ "t_test", "t_test_regression", "type", "unidcdf",
+ "unidinv", "unidpdf", "unidrnd", "unifcdf",
+ "unifinv", "unifpdf", "unifrnd", "union", "unique",
+ "unix", "unmkpp", "unpack", "untabify", "untar",
+ "unwrap", "unzip", "u_test", "validatestring",
+ "vander", "var", "var_test", "vech", "ver",
+ "version", "view", "voronoi", "voronoin",
+ "waitforbuttonpress", "wavread", "wavwrite",
+ "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
+ "welch_test", "what", "white", "whitebg",
+ "wienrnd", "wilcoxon_test", "wilkinson", "winter",
+ "xlabel", "xlim", "ylabel", "yulewalker", "zip",
+ "zlabel", "z_test")
+
+ loadable_kw = (
+ "airy", "amd", "balance", "besselh", "besseli",
+ "besselj", "besselk", "bessely", "bitpack",
+ "bsxfun", "builtin", "ccolamd", "cellfun",
+ "cellslices", "chol", "choldelete", "cholinsert",
+ "cholinv", "cholshift", "cholupdate", "colamd",
+ "colloc", "convhulln", "convn", "csymamd",
+ "cummax", "cummin", "daspk", "daspk_options",
+ "dasrt", "dasrt_options", "dassl", "dassl_options",
+ "dbclear", "dbdown", "dbstack", "dbstatus",
+ "dbstop", "dbtype", "dbup", "dbwhere", "det",
+ "dlmread", "dmperm", "dot", "eig", "eigs",
+ "endgrent", "endpwent", "etree", "fft", "fftn",
+ "fftw", "filter", "find", "full", "gcd",
+ "getgrent", "getgrgid", "getgrnam", "getpwent",
+ "getpwnam", "getpwuid", "getrusage", "givens",
+ "gmtime", "gnuplot_binary", "hess", "ifft",
+ "ifftn", "inv", "isdebugmode", "issparse", "kron",
+ "localtime", "lookup", "lsode", "lsode_options",
+ "lu", "luinc", "luupdate", "matrix_type", "max",
+ "min", "mktime", "pinv", "qr", "qrdelete",
+ "qrinsert", "qrshift", "qrupdate", "quad",
+ "quad_options", "qz", "rand", "rande", "randg",
+ "randn", "randp", "randperm", "rcond", "regexp",
+ "regexpi", "regexprep", "schur", "setgrent",
+ "setpwent", "sort", "spalloc", "sparse", "spparms",
+ "sprank", "sqrtm", "strfind", "strftime",
+ "strptime", "strrep", "svd", "svd_driver", "syl",
+ "symamd", "symbfact", "symrcm", "time", "tsearch",
+ "typecast", "urlread", "urlwrite")
+
+ mapping_kw = (
+ "abs", "acos", "acosh", "acot", "acoth", "acsc",
+ "acsch", "angle", "arg", "asec", "asech", "asin",
+ "asinh", "atan", "atanh", "beta", "betainc",
+ "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
+ "cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
+ "erfcx", "erfinv", "exp", "finite", "fix", "floor",
+ "fmod", "gamma", "gammainc", "gammaln", "imag",
+ "isalnum", "isalpha", "isascii", "iscntrl",
+ "isdigit", "isfinite", "isgraph", "isinf",
+ "islower", "isna", "isnan", "isprint", "ispunct",
+ "isspace", "isupper", "isxdigit", "lcm", "lgamma",
+ "log", "lower", "mod", "real", "rem", "round",
+ "roundb", "sec", "sech", "sign", "sin", "sinh",
+ "sqrt", "tan", "tanh", "toascii", "tolower", "xor")
+
+ builtin_consts = (
+ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
+ "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
+ "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
+ "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
+ "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
+ "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
+ "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
+ "WSTOPSIG", "WTERMSIG", "WUNTRACED")
+
+ tokens = {
+ 'root': [
+ (r'%\{\s*\n', Comment.Multiline, 'percentblockcomment'),
+ (r'#\{\s*\n', Comment.Multiline, 'hashblockcomment'),
+ (r'[%#].*$', Comment),
+ (r'^\s*function\b', Keyword, 'deffunc'),
+
+ # from 'iskeyword' on hg changeset 8cc154f45e37
+ (words((
+ '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef',
+ 'continue', 'do', 'else', 'elseif', 'end', 'end_try_catch',
+ 'end_unwind_protect', 'endclassdef', 'endevents', 'endfor',
+ 'endfunction', 'endif', 'endmethods', 'endproperties', 'endswitch',
+ 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if',
+ 'methods', 'otherwise', 'persistent', 'properties', 'return',
+ 'set', 'static', 'switch', 'try', 'until', 'unwind_protect',
+ 'unwind_protect_cleanup', 'while'), suffix=r'\b'),
+ Keyword),
+
+ (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw,
+ suffix=r'\b'), Name.Builtin),
+
+ (words(builtin_consts, suffix=r'\b'), Name.Constant),
+
+ # operators in Octave but not Matlab:
+ (r'-=|!=|!|/=|--', Operator),
+ # operators:
+ (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
+ # operators in Octave but not Matlab requiring escape for re:
+ (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator),
+ # operators requiring escape for re:
+ (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
+
+
+ # punctuation:
+ (r'[\[\](){}:@.,]', Punctuation),
+ (r'=|:|;', Punctuation),
+
+ (r'"[^"]*"', String),
+
+ (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
+ (r'\d+[eEf][+-]?[0-9]+', Number.Float),
+ (r'\d+', Number.Integer),
+
+ # quote can be transpose, instead of string:
+ # (not great, but handles common cases...)
+ (r'(?<=[\w)\].])\'+', Operator),
+ (r'(?<![\w)\].])\'', String, 'string'),
+
+ (r'[a-zA-Z_]\w*', Name),
+ (r'\s+', Text),
+ (r'.', Text),
+ ],
+ 'percentblockcomment': [
+ (r'^\s*%\}', Comment.Multiline, '#pop'),
+ (r'^.*\n', Comment.Multiline),
+ (r'.', Comment.Multiline),
+ ],
+ 'hashblockcomment': [
+ (r'^\s*#\}', Comment.Multiline, '#pop'),
+ (r'^.*\n', Comment.Multiline),
+ (r'.', Comment.Multiline),
+ ],
+ 'string': [
+ (r"[^']*'", String, '#pop'),
+ ],
+ 'deffunc': [
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ bygroups(Whitespace, Text, Whitespace, Punctuation,
+ Whitespace, Name.Function, Punctuation, Text,
+ Punctuation, Whitespace), '#pop'),
+ # function with no args
+ (r'(\s*)([a-zA-Z_]\w*)',
+ bygroups(Whitespace, Name.Function), '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ """Octave is quite hard to spot, and it looks like Matlab as well."""
+ return 0
+
+
+class ScilabLexer(RegexLexer):
+ """
+ For Scilab source code.
+
+ .. versionadded:: 1.5
+ """
+ name = 'Scilab'
+ url = 'https://www.scilab.org/'
+ aliases = ['scilab']
+ filenames = ['*.sci', '*.sce', '*.tst']
+ mimetypes = ['text/scilab']
+
+ tokens = {
+ 'root': [
+ (r'//.*?$', Comment.Single),
+ (r'^\s*function\b', Keyword, 'deffunc'),
+
+ (words((
+ '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',
+ 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',
+ 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',
+ 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',
+ 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',
+ 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'),
+ Keyword),
+
+ (words(_scilab_builtins.functions_kw +
+ _scilab_builtins.commands_kw +
+ _scilab_builtins.macros_kw, suffix=r'\b'), Name.Builtin),
+
+ (words(_scilab_builtins.variables_kw, suffix=r'\b'), Name.Constant),
+
+ # operators:
+ (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
+ # operators requiring escape for re:
+ (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
+
+ # punctuation:
+ (r'[\[\](){}@.,=:;]+', Punctuation),
+
+ (r'"[^"]*"', String),
+
+ # quote can be transpose, instead of string:
+ # (not great, but handles common cases...)
+ (r'(?<=[\w)\].])\'+', Operator),
+ (r'(?<![\w)\].])\'', String, 'string'),
+
+ (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
+ (r'\d+[eEf][+-]?[0-9]+', Number.Float),
+ (r'\d+', Number.Integer),
+
+ (r'[a-zA-Z_]\w*', Name),
+ (r'\s+', Whitespace),
+ (r'.', Text),
+ ],
+ 'string': [
+ (r"[^']*'", String, '#pop'),
+ (r'.', String, '#pop'),
+ ],
+ 'deffunc': [
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ bygroups(Whitespace, Text, Whitespace, Punctuation,
+ Whitespace, Name.Function, Punctuation, Text,
+ Punctuation, Whitespace), '#pop'),
+ # function with no args
+ (r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
+ ],
+ }
+
+ # the following is needed to distinguish Scilab and GAP .tst files
+ def analyse_text(text):
+ score = 0.0
+
+ # Scilab comments (don't appear in e.g. GAP code)
+ if re.search(r"^\s*//", text):
+ score += 0.1
+ if re.search(r"^\s*/\*", text):
+ score += 0.1
+
+ return min(score, 1.0)
diff --git a/pygments/lexers/maxima.py b/pygments/lexers/maxima.py
new file mode 100644
index 0000000..214e24e
--- /dev/null
+++ b/pygments/lexers/maxima.py
@@ -0,0 +1,85 @@
+"""
+ pygments.lexers.maxima
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the computer algebra system Maxima.
+
+ Derived from pygments/lexers/algebra.py.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['MaximaLexer']
+
+class MaximaLexer(RegexLexer):
+ """
+ A Maxima lexer.
+ Derived from pygments.lexers.MuPADLexer.
+
+ .. versionadded:: 2.11
+ """
+ name = 'Maxima'
+ url = 'http://maxima.sourceforge.net'
+ aliases = ['maxima', 'macsyma']
+ filenames = ['*.mac', '*.max']
+
+ keywords = ('if', 'then', 'else', 'elseif',
+ 'do', 'while', 'repeat', 'until',
+ 'for', 'from', 'to', 'downto', 'step', 'thru')
+
+ constants = ('%pi', '%e', '%phi', '%gamma', '%i',
+ 'und', 'ind', 'infinity', 'inf', 'minf',
+ 'true', 'false', 'unknown', 'done')
+
+ operators = (r'.', r':', r'=', r'#',
+ r'+', r'-', r'*', r'/', r'^',
+ r'@', r'>', r'<', r'|', r'!', r"'")
+
+ operator_words = ('and', 'or', 'not')
+
+ tokens = {
+ 'root': [
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
+ (r'[,;$]', Punctuation),
+ (words (constants), Name.Constant),
+ (words (keywords), Keyword),
+ (words (operators), Operator),
+ (words (operator_words), Operator.Word),
+ (r'''(?x)
+ ((?:[a-zA-Z_#][\w#]*|`[^`]*`)
+ (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
+ bygroups(Name.Function, Text.Whitespace, Punctuation)),
+ (r'''(?x)
+ (?:[a-zA-Z_#%][\w#%]*|`[^`]*`)
+ (?:::[a-zA-Z_#%][\w#%]*|`[^`]*`)*''', Name.Variable),
+ (r'[-+]?(\d*\.\d+([bdefls][-+]?\d+)?|\d+(\.\d*)?[bdefls][-+]?\d+)', Number.Float),
+ (r'[-+]?\d+', Number.Integer),
+ (r'\s+', Text.Whitespace),
+ (r'.', Text)
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ]
+ }
+
+ def analyse_text (text):
+ strength = 0.0
+ # Input expression terminator.
+ if re.search (r'\$\s*$', text, re.MULTILINE):
+ strength += 0.05
+ # Function definition operator.
+ if ':=' in text:
+ strength += 0.02
+ return strength
diff --git a/pygments/lexers/meson.py b/pygments/lexers/meson.py
new file mode 100644
index 0000000..e8d1f08
--- /dev/null
+++ b/pygments/lexers/meson.py
@@ -0,0 +1,140 @@
+"""
+ pygments.lexers.meson
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Pygments lexer for the Meson build system
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include
+from pygments.token import Comment, Name, Number, Punctuation, Operator, \
+ Keyword, String, Whitespace
+
+__all__ = ['MesonLexer']
+
+
+class MesonLexer(RegexLexer):
+ """Meson language lexer.
+
+ The grammar definition use to transcribe the syntax was retrieved from
+ https://mesonbuild.com/Syntax.html#grammar for version 0.58.
+ Some of those definitions are improperly transcribed, so the Meson++
+ implementation was also checked: https://github.com/dcbaker/meson-plus-plus.
+
+ .. versionadded:: 2.10
+ """
+
+ # TODO String interpolation @VARNAME@ inner matches
+ # TODO keyword_arg: value inner matches
+
+ name = 'Meson'
+ url = 'https://mesonbuild.com/'
+ aliases = ['meson', 'meson.build']
+ filenames = ['meson.build', 'meson_options.txt']
+ mimetypes = ['text/x-meson']
+
+ tokens = {
+ 'root': [
+ (r'#.*?$', Comment),
+ (r"'''.*'''", String.Single),
+ (r'[1-9][0-9]*', Number.Integer),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[a-fA-F0-9]+', Number.Hex),
+ include('string'),
+ include('keywords'),
+ include('expr'),
+ (r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
+ (r'\s+', Whitespace),
+ ],
+ 'string': [
+ (r"[']{3}([']{0,2}([^\\']|\\(.|\n)))*[']{3}", String),
+ (r"'.*?(?<!\\)(\\\\)*?'", String),
+ ],
+ 'keywords': [
+ (words((
+ 'if',
+ 'elif',
+ 'else',
+ 'endif',
+ 'foreach',
+ 'endforeach',
+ 'break',
+ 'continue',
+ ),
+ suffix=r'\b'), Keyword),
+ ],
+ 'expr': [
+ (r'(in|and|or|not)\b', Operator.Word),
+ (r'(\*=|/=|%=|\+]=|-=|==|!=|\+|-|=)', Operator),
+ (r'[\[\]{}:().,?]', Punctuation),
+ (words(('true', 'false'), suffix=r'\b'), Keyword.Constant),
+ include('builtins'),
+ (words((
+ 'meson',
+ 'build_machine',
+ 'host_machine',
+ 'target_machine',
+ ),
+ suffix=r'\b'), Name.Variable.Magic),
+ ],
+ 'builtins': [
+ # This list was extracted from the v0.58 reference manual
+ (words((
+ 'add_global_arguments',
+ 'add_global_link_arguments',
+ 'add_languages',
+ 'add_project_arguments',
+ 'add_project_link_arguments',
+ 'add_test_setup',
+ 'assert',
+ 'benchmark',
+ 'both_libraries',
+ 'build_target',
+ 'configuration_data',
+ 'configure_file',
+ 'custom_target',
+ 'declare_dependency',
+ 'dependency',
+ 'disabler',
+ 'environment',
+ 'error',
+ 'executable',
+ 'files',
+ 'find_library',
+ 'find_program',
+ 'generator',
+ 'get_option',
+ 'get_variable',
+ 'include_directories',
+ 'install_data',
+ 'install_headers',
+ 'install_man',
+ 'install_subdir',
+ 'is_disabler',
+ 'is_variable',
+ 'jar',
+ 'join_paths',
+ 'library',
+ 'message',
+ 'project',
+ 'range',
+ 'run_command',
+ 'set_variable',
+ 'shared_library',
+ 'shared_module',
+ 'static_library',
+ 'subdir',
+ 'subdir_done',
+ 'subproject',
+ 'summary',
+ 'test',
+ 'vcs_tag',
+ 'warning',
+ ),
+ prefix=r'(?<!\.)',
+ suffix=r'\b'), Name.Builtin),
+ (r'(?<!\.)import\b', Name.Namespace),
+ ],
+ }
diff --git a/pygments/lexers/mime.py b/pygments/lexers/mime.py
new file mode 100644
index 0000000..0a2db09
--- /dev/null
+++ b/pygments/lexers/mime.py
@@ -0,0 +1,210 @@
+"""
+ pygments.lexers.mime
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Multipurpose Internet Mail Extensions (MIME) data.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.lexers import get_lexer_for_mimetype
+from pygments.token import Text, Name, String, Operator, Comment, Other
+from pygments.util import get_int_opt, ClassNotFound
+
+__all__ = ["MIMELexer"]
+
+
+class MIMELexer(RegexLexer):
+ """
+ Lexer for Multipurpose Internet Mail Extensions (MIME) data. This lexer is
+ designed to process nested multipart data.
+
+ It assumes that the given data contains both header and body (and is
+ split at an empty line). If no valid header is found, then the entire data
+ will be treated as body.
+
+ Additional options accepted:
+
+ `MIME-max-level`
+ Max recursion level for nested MIME structure. Any negative number
+ would treated as unlimited. (default: -1)
+
+ `Content-Type`
+ Treat the data as a specific content type. Useful when header is
+ missing, or this lexer would try to parse from header. (default:
+ `text/plain`)
+
+ `Multipart-Boundary`
+ Set the default multipart boundary delimiter. This option is only used
+ when `Content-Type` is `multipart` and header is missing. This lexer
+ would try to parse from header by default. (default: None)
+
+ `Content-Transfer-Encoding`
+ Treat the data as a specific encoding. Or this lexer would try to parse
+ from header by default. (default: None)
+
+ .. versionadded:: 2.5
+ """
+
+ name = "MIME"
+ aliases = ["mime"]
+ mimetypes = ["multipart/mixed",
+ "multipart/related",
+ "multipart/alternative"]
+
+ def __init__(self, **options):
+ super().__init__(**options)
+ self.boundary = options.get("Multipart-Boundary")
+ self.content_transfer_encoding = options.get("Content_Transfer_Encoding")
+ self.content_type = options.get("Content_Type", "text/plain")
+ self.max_nested_level = get_int_opt(options, "MIME-max-level", -1)
+
+ def get_header_tokens(self, match):
+ field = match.group(1)
+
+ if field.lower() in self.attention_headers:
+ yield match.start(1), Name.Tag, field + ":"
+ yield match.start(2), Text.Whitespace, match.group(2)
+
+ pos = match.end(2)
+ body = match.group(3)
+ for i, t, v in self.get_tokens_unprocessed(body, ("root", field.lower())):
+ yield pos + i, t, v
+
+ else:
+ yield match.start(), Comment, match.group()
+
+ def get_body_tokens(self, match):
+ pos_body_start = match.start()
+ entire_body = match.group()
+
+ # skip first newline
+ if entire_body[0] == '\n':
+ yield pos_body_start, Text.Whitespace, '\n'
+ pos_body_start = pos_body_start + 1
+ entire_body = entire_body[1:]
+
+ # if it is not a multipart
+ if not self.content_type.startswith("multipart") or not self.boundary:
+ for i, t, v in self.get_bodypart_tokens(entire_body):
+ yield pos_body_start + i, t, v
+ return
+
+ # find boundary
+ bdry_pattern = r"^--%s(--)?\n" % re.escape(self.boundary)
+ bdry_matcher = re.compile(bdry_pattern, re.MULTILINE)
+
+ # some data has prefix text before first boundary
+ m = bdry_matcher.search(entire_body)
+ if m:
+ pos_part_start = pos_body_start + m.end()
+ pos_iter_start = lpos_end = m.end()
+ yield pos_body_start, Text, entire_body[:m.start()]
+ yield pos_body_start + lpos_end, String.Delimiter, m.group()
+ else:
+ pos_part_start = pos_body_start
+ pos_iter_start = 0
+
+ # process tokens of each body part
+ for m in bdry_matcher.finditer(entire_body, pos_iter_start):
+ # bodypart
+ lpos_start = pos_part_start - pos_body_start
+ lpos_end = m.start()
+ part = entire_body[lpos_start:lpos_end]
+ for i, t, v in self.get_bodypart_tokens(part):
+ yield pos_part_start + i, t, v
+
+ # boundary
+ yield pos_body_start + lpos_end, String.Delimiter, m.group()
+ pos_part_start = pos_body_start + m.end()
+
+ # some data has suffix text after last boundary
+ lpos_start = pos_part_start - pos_body_start
+ if lpos_start != len(entire_body):
+ yield pos_part_start, Text, entire_body[lpos_start:]
+
+ def get_bodypart_tokens(self, text):
+ # return if:
+ # * no content
+ # * no content type specific
+ # * content encoding is not readable
+ # * max recurrsion exceed
+ if not text.strip() or not self.content_type:
+ return [(0, Other, text)]
+
+ cte = self.content_transfer_encoding
+ if cte and cte not in {"8bit", "7bit", "quoted-printable"}:
+ return [(0, Other, text)]
+
+ if self.max_nested_level == 0:
+ return [(0, Other, text)]
+
+ # get lexer
+ try:
+ lexer = get_lexer_for_mimetype(self.content_type)
+ except ClassNotFound:
+ return [(0, Other, text)]
+
+ if isinstance(lexer, type(self)):
+ lexer.max_nested_level = self.max_nested_level - 1
+
+ return lexer.get_tokens_unprocessed(text)
+
+ def store_content_type(self, match):
+ self.content_type = match.group(1)
+
+ prefix_len = match.start(1) - match.start(0)
+ yield match.start(0), Text.Whitespace, match.group(0)[:prefix_len]
+ yield match.start(1), Name.Label, match.group(2)
+ yield match.end(2), String.Delimiter, '/'
+ yield match.start(3), Name.Label, match.group(3)
+
+ def get_content_type_subtokens(self, match):
+ yield match.start(1), Text, match.group(1)
+ yield match.start(2), Text.Whitespace, match.group(2)
+ yield match.start(3), Name.Attribute, match.group(3)
+ yield match.start(4), Operator, match.group(4)
+ yield match.start(5), String, match.group(5)
+
+ if match.group(3).lower() == "boundary":
+ boundary = match.group(5).strip()
+ if boundary[0] == '"' and boundary[-1] == '"':
+ boundary = boundary[1:-1]
+ self.boundary = boundary
+
+ def store_content_transfer_encoding(self, match):
+ self.content_transfer_encoding = match.group(0).lower()
+ yield match.start(0), Name.Constant, match.group(0)
+
+ attention_headers = {"content-type", "content-transfer-encoding"}
+
+ tokens = {
+ "root": [
+ (r"^([\w-]+):( *)([\s\S]*?\n)(?![ \t])", get_header_tokens),
+ (r"^$[\s\S]+", get_body_tokens),
+ ],
+ "header": [
+ # folding
+ (r"\n[ \t]", Text.Whitespace),
+ (r"\n(?![ \t])", Text.Whitespace, "#pop"),
+ ],
+ "content-type": [
+ include("header"),
+ (
+ r"^\s*((multipart|application|audio|font|image|model|text|video"
+ r"|message)/([\w-]+))",
+ store_content_type,
+ ),
+ (r'(;)((?:[ \t]|\n[ \t])*)([\w:-]+)(=)([\s\S]*?)(?=;|\n(?![ \t]))',
+ get_content_type_subtokens),
+ (r';[ \t]*\n(?![ \t])', Text, '#pop'),
+ ],
+ "content-transfer-encoding": [
+ include("header"),
+ (r"([\w-]+)", store_content_transfer_encoding),
+ ],
+ }
diff --git a/pygments/lexers/minecraft.py b/pygments/lexers/minecraft.py
new file mode 100644
index 0000000..0506fdb
--- /dev/null
+++ b/pygments/lexers/minecraft.py
@@ -0,0 +1,394 @@
+"""
+ pygments.lexers.minecraft
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Minecraft related languages.
+
+ SNBT. A data communication format used in Minecraft.
+ wiki: https://minecraft.fandom.com/wiki/NBT_format
+
+ MCFunction. The Function file for Minecraft Data packs and Add-ons.
+ official: https://learn.microsoft.com/en-us/minecraft/creator/documents/functionsintroduction
+ wiki: https://minecraft.fandom.com/wiki/Function
+
+ MCSchema. A kind of data Schema for Minecraft Add-on Development.
+ official: https://learn.microsoft.com/en-us/minecraft/creator/reference/content/schemasreference/
+ community example: https://www.mcbe-dev.net/addons/data-driven/manifest.html
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, default, include, bygroups
+from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
+
+__all__ = ['SNBTLexer', 'MCFunctionLexer', 'MCSchemaLexer']
+
+
+class SNBTLexer(RegexLexer):
+ """Lexer for stringified NBT, a data format used in Minecraft
+
+ .. versionadded:: 2.12.0
+ """
+
+ name = "SNBT"
+ url = "https://minecraft.fandom.com/wiki/NBT_format"
+ aliases = ["snbt"]
+ filenames = ["*.snbt"]
+ mimetypes = ["text/snbt"]
+
+ tokens = {
+ "root": [
+ # We only look for the open bracket here since square bracket
+ # is only valid in NBT pathing (which is a mcfunction idea).
+ (r"\{", Punctuation, "compound"),
+ (r"[^\{]+", Text),
+ ],
+
+ "whitespace": [
+ (r"\s+", Whitespace),
+ ],
+
+ "operators": [
+ (r"[,:;]", Punctuation),
+ ],
+
+ "literals": [
+ (r"(true|false)", Keyword.Constant),
+ (r"-?\d+[eE]-?\d+", Number.Float),
+ (r"-?\d*\.\d+[fFdD]?", Number.Float),
+ (r"-?\d+[bBsSlLfFdD]?", Number.Integer),
+
+ # Separate states for both types of strings so they don't entangle
+ (r'"', String.Double, "literals.string_double"),
+ (r"'", String.Single, "literals.string_single"),
+ ],
+ "literals.string_double": [
+ (r"\\.", String.Escape),
+ (r'[^\\"\n]+', String.Double),
+ (r'"', String.Double, "#pop"),
+ ],
+ "literals.string_single": [
+ (r"\\.", String.Escape),
+ (r"[^\\'\n]+", String.Single),
+ (r"'", String.Single, "#pop"),
+ ],
+
+ "compound": [
+ # this handles the unquoted snbt keys
+ # note: stringified keys still work
+ (r"[A-Z_a-z]+", Name.Attribute),
+ include("operators"),
+ include("whitespace"),
+ include("literals"),
+ (r"\{", Punctuation, "#push"),
+ (r"\[", Punctuation, "list"),
+ (r"\}", Punctuation, "#pop"),
+ ],
+
+ "list": [
+ (r"[A-Z_a-z]+", Name.Attribute),
+ include("literals"),
+ include("operators"),
+ include("whitespace"),
+ (r"\[", Punctuation, "#push"),
+ (r"\{", Punctuation, "compound"),
+ (r"\]", Punctuation, "#pop"),
+ ],
+ }
+
+
+class MCFunctionLexer(RegexLexer):
+ """Lexer for the mcfunction scripting language used in Minecraft
+ Modelled somewhat after the `GitHub mcfunction grammar <https://github.com/Arcensoth/language-mcfunction>`_.
+
+ .. versionadded:: 2.12.0
+ """
+
+ name = "MCFunction"
+ url = "https://minecraft.fandom.com/wiki/Commands"
+ aliases = ["mcfunction", "mcf"]
+ filenames = ["*.mcfunction"]
+ mimetypes = ["text/mcfunction"]
+
+ # Used to denotate the start of a block comment, borrowed from Github's mcfunction
+ _block_comment_prefix = "[>!]"
+
+ tokens = {
+ "root": [
+ include("names"),
+ include("comments"),
+ include("literals"),
+ include("whitespace"),
+ include("property"),
+ include("operators"),
+ include("selectors"),
+ ],
+
+ "names": [
+ # The start of a command (either beginning of line OR after the run keyword)
+ # We don't encode a list of keywords since mods, plugins, or even pre-processors
+ # may add new commands, so we have a 'close-enough' regex which catches them.
+ (r"^(\s*)([a-z_]+)", bygroups(Whitespace, Name.Builtin)),
+ (r"(?<=run)\s+[a-z_]+", Name.Builtin),
+
+ # UUID
+ (r"\b[0-9a-fA-F]+(?:-[0-9a-fA-F]+){4}\b", Name.Variable),
+ include("resource-name"),
+ # normal command names and scoreboards
+ # there's no way to know the differences unfortuntely
+ (r"[A-Za-z_][\w.#%$]+", Keyword.Constant),
+ (r"[#%$][\w.#%$]+", Name.Variable.Magic),
+ ],
+
+ "resource-name": [
+ # resource names have to be lowercase
+ (r"#?[a-z_][a-z_.-]*:[a-z0-9_./-]+", Name.Function),
+ # similar to above except optional `:``
+ # a `/` must be present "somewhere"
+ (r"#?[a-z0-9_\.\-]+\/[a-z0-9_\.\-\/]+", Name.Function),
+ ],
+
+ "whitespace": [
+ (r"\s+", Whitespace),
+ ],
+
+ "comments": [
+ (rf"^\s*(#{_block_comment_prefix})", Comment.Multiline,
+ ("comments.block", "comments.block.emphasized")),
+ (r"#.*$", Comment.Single),
+ ],
+ "comments.block": [
+ (rf"^\s*#{_block_comment_prefix}", Comment.Multiline,
+ "comments.block.emphasized"),
+ (r"^\s*#", Comment.Multiline, "comments.block.normal"),
+ default("#pop"),
+ ],
+ "comments.block.normal": [
+ include("comments.block.special"),
+ (r"\S+", Comment.Multiline),
+ (r"\n", Text, "#pop"),
+ include("whitespace"),
+ ],
+ "comments.block.emphasized": [
+ include("comments.block.special"),
+ (r"\S+", String.Doc),
+ (r"\n", Text, "#pop"),
+ include("whitespace"),
+ ],
+ "comments.block.special": [
+ # Params
+ (r"@\S+", Name.Decorator),
+
+ include("resource-name"),
+
+ # Scoreboard player names
+ (r"[#%$][\w.#%$]+", Name.Variable.Magic),
+ ],
+
+ "operators": [
+ (r"[\-~%^?!+*<>\\/|&=.]", Operator),
+ ],
+
+ "literals": [
+ (r"\.\.", Literal),
+ (r"(true|false)", Keyword.Pseudo),
+
+ # these are like unquoted strings and appear in many places
+ (r"[A-Za-z_]+", Name.Variable.Class),
+
+ (r"[0-7]b", Number.Byte),
+ (r"[+-]?\d*\.?\d+([eE]?[+-]?\d+)?[df]?\b", Number.Float),
+ (r"[+-]?\d+\b", Number.Integer),
+ (r'"', String.Double, "literals.string-double"),
+ (r"'", String.Single, "literals.string-single"),
+ ],
+ "literals.string-double": [
+ (r"\\.", String.Escape),
+ (r'[^\\"\n]+', String.Double),
+ (r'"', String.Double, "#pop"),
+ ],
+ "literals.string-single": [
+ (r"\\.", String.Escape),
+ (r"[^\\'\n]+", String.Single),
+ (r"'", String.Single, "#pop"),
+ ],
+
+ "selectors": [
+ (r"@[a-z]", Name.Variable),
+ ],
+
+
+ ## Generic Property Container
+ # There are several, differing instances where the language accepts
+ # specific contained keys or contained key, value pairings.
+ #
+ # Property Maps:
+ # - Starts with either `[` or `{`
+ # - Key separated by `:` or `=`
+ # - Deliminated by `,`
+ #
+ # Property Lists:
+ # - Starts with `[`
+ # - Deliminated by `,`
+ #
+ # For simplicity, these patterns match a generic, nestable structure
+ # which follow a key, value pattern. For normal lists, there's only keys.
+ # This allow some "illegal" structures, but we'll accept those for
+ # sake of simplicity
+ #
+ # Examples:
+ # - `[facing=up, powered=true]` (blockstate)
+ # - `[name="hello world", nbt={key: 1b}]` (selector + nbt)
+ # - `[{"text": "value"}, "literal"]` (json)
+ ##
+ "property": [
+ # This state gets included in root and also several substates
+ # We do this to shortcut the starting of new properties
+ # within other properties. Lists can have sublists and compounds
+ # and values can start a new property (see the `difficult_1.txt`
+ # snippet).
+ (r"\{", Punctuation, ("property.curly", "property.key")),
+ (r"\[", Punctuation, ("property.square", "property.key")),
+ ],
+ "property.curly": [
+ include("whitespace"),
+ include("property"),
+ (r"\}", Punctuation, "#pop"),
+ ],
+ "property.square": [
+ include("whitespace"),
+ include("property"),
+ (r"\]", Punctuation, "#pop"),
+
+ # lists can have sequences of items
+ (r",", Punctuation),
+ ],
+ "property.key": [
+ include("whitespace"),
+
+ # resource names (for advancements)
+ # can omit `:` to default `minecraft:`
+ # must check if there is a future equals sign if `:` is in the name
+ (r"#?[a-z_][a-z_\.\-]*\:[a-z0-9_\.\-/]+(?=\s*\=)", Name.Attribute, "property.delimiter"),
+ (r"#?[a-z_][a-z0-9_\.\-/]+", Name.Attribute, "property.delimiter"),
+
+ # unquoted NBT key
+ (r"[A-Za-z_\-\+]+", Name.Attribute, "property.delimiter"),
+
+ # quoted JSON or NBT key
+ (r'"', Name.Attribute, "property.delimiter", "literals.string-double"),
+ (r"'", Name.Attribute, "property.delimiter", "literals.string-single"),
+
+ # index for a list
+ (r"-?\d+", Number.Integer, "property.delimiter"),
+
+ default("#pop"),
+ ],
+ "property.key.string-double": [
+ (r"\\.", String.Escape),
+ (r'[^\\"\n]+', Name.Attribute),
+ (r'"', Name.Attribute, "#pop"),
+ ],
+ "property.key.string-single": [
+ (r"\\.", String.Escape),
+ (r"[^\\'\n]+", Name.Attribute),
+ (r"'", Name.Attribute, "#pop"),
+ ],
+ "property.delimiter": [
+ include("whitespace"),
+
+ (r"[:=]!?", Punctuation, "property.value"),
+ (r",", Punctuation),
+
+ default("#pop"),
+ ],
+ "property.value": [
+ include("whitespace"),
+
+ # unquoted resource names are valid literals here
+ (r"#?[a-z_][a-z_\.\-]*\:[a-z0-9_\.\-/]+", Name.Tag),
+ (r"#?[a-z_][a-z0-9_\.\-/]+", Name.Tag),
+
+ include("literals"),
+ include("property"),
+
+ default("#pop"),
+ ],
+ }
+
+
+class MCSchemaLexer(RegexLexer):
+ """Lexer for Minecraft Add-ons data Schemas, an interface structure standard used in Minecraft
+
+ .. versionadded:: 2.14.0
+ """
+
+ name = 'MCSchema'
+ url = 'https://learn.microsoft.com/en-us/minecraft/creator/reference/content/schemasreference/'
+ aliases = ['mcschema']
+ filenames = ['*.mcschema']
+ mimetypes = ['text/mcschema']
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Whitespace),
+ (r'//.*?$', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Whitespace, '#pop')
+ ],
+ 'singlestring': [
+ (r'\\.', String.Escape),
+ (r"'", String.Single, '#pop'),
+ (r"[^\\']+", String.Single),
+ ],
+ 'doublestring': [
+ (r'\\.', String.Escape),
+ (r'"', String.Double, '#pop'),
+ (r'[^\\"]+', String.Double),
+ ],
+ 'root': [
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+
+ # keywords for optional word and field types
+ (r'(?<=: )opt', Operator.Word),
+ (r'(?<=\s)[\w-]*(?=(\s+"|\n))', Keyword.Declaration),
+
+ # numeric literals
+ (r'0[bB][01]+', Number.Bin),
+ (r'0[oO]?[0-7]+', Number.Oct),
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'(\.\d+|\d+\.\d*|\d+)([eE][-+]?\d+)?', Number.Float),
+
+ # possible punctuations
+ (r'\.\.\.|=>', Punctuation),
+ (r'\+\+|--|~|\?\?=?|\?|:|\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|(?:\*\*|\|\||&&|[-<>+*%&|^/]))=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+
+ # strings
+ (r"'", String.Single, 'singlestring'),
+ (r'"', String.Double, 'doublestring'),
+
+ # title line
+ (r'[\w-]*?(?=:\{?\n)', String.Symbol),
+ # title line with a version code, formatted
+ # `major.minor.patch-prerelease+buildmeta`
+ (r'([\w-]*?)(:)(\d+)(?:(\.)(\d+)(?:(\.)(\d+)(?:(\-)((?:[^\W_]|-)*(?:\.(?:[^\W_]|-)*)*))?(?:(\+)((?:[^\W_]|-)+(?:\.(?:[^\W_]|-)+)*))?)?)?(?=:\{?\n)', bygroups(String.Symbol, Operator, Number.Integer, Operator, Number.Integer, Operator, Number.Integer, Operator, String, Operator, String)),
+
+ (r'.*\n', Text),
+ ]
+ }
diff --git a/pygments/lexers/mips.py b/pygments/lexers/mips.py
new file mode 100644
index 0000000..e20038b
--- /dev/null
+++ b/pygments/lexers/mips.py
@@ -0,0 +1,128 @@
+"""
+ pygments.lexers.mips
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for MIPS assembly.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Whitespace, Comment, String, Keyword, Name, Text
+
+__all__ = ["MIPSLexer"]
+
+
+class MIPSLexer(RegexLexer):
+ """
+ A MIPS Assembly Lexer.
+
+ Based on the Emacs major mode by hlissner:
+ https://github.com/hlissner/emacs-mips-mode
+ """
+
+ name = 'MIPS'
+ aliases = ['mips']
+ # TODO: add '*.s' and '*.asm', which will require designing an analyse_text
+ # method for this lexer and refactoring those from Gas and Nasm in order to
+ # have relatively reliable detection
+ filenames = ['*.mips', '*.MIPS']
+
+ keywords = [
+ # Arithmetic insturctions
+ "add", "sub", "subu", "addi", "subi", "addu", "addiu",
+ # Multiplication/division
+ "mul", "mult", "multu", "mulu", "madd", "maddu", "msub", "msubu", "div", "divu",
+ # Bitwise operations
+ "and", "or", "nor", "xor", "andi", "ori", "xori", "clo", "clz",
+ # Shifts
+ "sll", "srl", "sllv", "srlv", "sra", "srav",
+ # Comparisons
+ "slt", "sltu", "slti", "sltiu",
+ # Move data
+ "mfhi", "mthi", "mflo", "mtlo", "movn", "movz", "movf", "movt",
+ # Jump
+ "j", "jal", "jalr", "jr",
+ # branch
+ "bc1f", "bc1t", "beq", "bgez", "bgezal", "bgtz", "blez", "bltzal", "bltz", "bne",
+ # Load
+ "lui", "lb", "lbu", "lh", "lhu", "lw", "lwcl", "lwl", "lwr",
+ # Store
+ "sb", "sh", "sw", "swl", "swr", # coproc: swc1 sdc1
+ # Concurrent load/store
+ "ll", "sc",
+ # Trap handling
+ "teq", "teqi", "tne", "tneqi", "tge", "tgeu", "tgei", "tgeiu", "tlt", "tltu", "tlti",
+ "tltiu",
+ # Exception / Interrupt
+ "eret", "break", "bop", "syscall",
+ # --- Floats -----------------------------------------------------
+ # Arithmetic
+ "add.s", "add.d", "sub.s", "sub.d", "mul.s", "mul.d", "div.s", "div.d", "neg.d",
+ "neg.s",
+ # Comparison
+ "c.e.d", "c.e.s", "c.le.d", "c.le.s", "c.lt.s", "c.lt.d", # "c.gt.s", "c.gt.d",
+ "madd.s", "madd.d", "msub.s", "msub.d",
+ # Move Floats
+ "mov.d", "move.s", "movf.d", "movf.s", "movt.d", "movt.s", "movn.d", "movn.s",
+ "movnzd", "movz.s", "movz.d",
+ # Conversion
+ "cvt.d.s", "cvt.d.w", "cvt.s.d", "cvt.s.w", "cvt.w.d", "cvt.w.s", "trunc.w.d",
+ "trunc.w.s",
+ # Math
+ "abs.s", "abs.d", "sqrt.s", "sqrt.d", "ceil.w.d", "ceil.w.s", "floor.w.d",
+ "floor.w.s", "round.w.d", "round.w.s",
+ ]
+
+ pseudoinstructions = [
+ # Arithmetic & logical
+ "rem", "remu", "mulo", "mulou", "abs", "neg", "negu", "not", "rol", "ror",
+ # branches
+ "b", "beqz", "bge", "bgeu", "bgt", "bgtu", "ble", "bleu", "blt", "bltu", "bnez",
+ # loads
+ "la", "li", "ld", "ulh", "ulhu", "ulw",
+ # Store
+ "sd", "ush", "usw",
+ # move
+ "move", # coproc: "mfc1.d",
+ # comparisons
+ "sgt", "sgtu", "sge", "sgeu", "sle", "sleu", "sne", "seq",
+ # --- Floats -----------------------------------------------------
+ # load-store
+ "l.d", "l.s", "s.d", "s.s",
+ ]
+
+ directives = [
+ ".align", ".ascii", ".asciiz", ".byte", ".data", ".double", ".extern", ".float",
+ ".globl", ".half", ".kdata", ".ktext", ".space", ".text", ".word",
+ ]
+
+ deprecated = [
+ "beql", "bnel", "bgtzl", "bgezl", "bltzl", "blezl", "bltzall", "bgezall",
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#.*', Comment),
+ (r'"', String, 'string'),
+ (r'-?[0-9]+?', Keyword.Constant),
+ (r'\w*:', Name.Function),
+ (words(deprecated, suffix=r'\b'), Keyword.Pseudo), # need warning face
+ (words(pseudoinstructions, suffix=r'\b'), Name.Variable),
+ (words(keywords, suffix=r'\b'), Keyword),
+ (r'[slm][ftwd]c[0-9]([.]d)?', Keyword),
+ (r'\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)',
+ Keyword.Type),
+ (words(directives, suffix=r'\b'), Name.Entity), # Preprocessor?
+ (r':|,|;|\{|\}|=>|@|\$|=', Name.Builtin),
+ (r'\w+', Text),
+ (r'.', Text),
+ ],
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'"', String, '#pop'),
+ (r'[^\\"]+', String),
+ ],
+ }
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
new file mode 100644
index 0000000..bd558c3
--- /dev/null
+++ b/pygments/lexers/ml.py
@@ -0,0 +1,960 @@
+"""
+ pygments.lexers.ml
+ ~~~~~~~~~~~~~~~~~~
+
+ Lexers for ML family languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error
+
+__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer', 'ReasonLexer', 'FStarLexer']
+
+
+class SMLLexer(RegexLexer):
+ """
+ For the Standard ML language.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Standard ML'
+ aliases = ['sml']
+ filenames = ['*.sml', '*.sig', '*.fun']
+ mimetypes = ['text/x-standardml', 'application/x-standardml']
+
+ alphanumid_reserved = {
+ # Core
+ 'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
+ 'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
+ 'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
+ 'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
+ # Modules
+ 'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
+ 'struct', 'structure', 'where',
+ }
+
+ symbolicid_reserved = {
+ # Core
+ ':', r'\|', '=', '=>', '->', '#',
+ # Modules
+ ':>',
+ }
+
+ nonid_reserved = {'(', ')', '[', ']', '{', '}', ',', ';', '...', '_'}
+
+ alphanumid_re = r"[a-zA-Z][\w']*"
+ symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
+
+ # A character constant is a sequence of the form #s, where s is a string
+ # constant denoting a string of size one character. This setup just parses
+ # the entire string as either a String.Double or a String.Char (depending
+ # on the argument), even if the String.Char is an erroneous
+ # multiple-character string.
+ def stringy(whatkind):
+ return [
+ (r'[^"\\]', whatkind),
+ (r'\\[\\"abtnvfr]', String.Escape),
+ # Control-character notation is used for codes < 32,
+ # where \^@ == \000
+ (r'\\\^[\x40-\x5e]', String.Escape),
+ # Docs say 'decimal digits'
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\\s+\\', String.Interpol),
+ (r'"', whatkind, '#pop'),
+ ]
+
+ # Callbacks for distinguishing tokens and reserved words
+ def long_id_callback(self, match):
+ if match.group(1) in self.alphanumid_reserved:
+ token = Error
+ else:
+ token = Name.Namespace
+ yield match.start(1), token, match.group(1)
+ yield match.start(2), Punctuation, match.group(2)
+
+ def end_id_callback(self, match):
+ if match.group(1) in self.alphanumid_reserved:
+ token = Error
+ elif match.group(1) in self.symbolicid_reserved:
+ token = Error
+ else:
+ token = Name
+ yield match.start(1), token, match.group(1)
+
+ def id_callback(self, match):
+ str = match.group(1)
+ if str in self.alphanumid_reserved:
+ token = Keyword.Reserved
+ elif str in self.symbolicid_reserved:
+ token = Punctuation
+ else:
+ token = Name
+ yield match.start(1), token, str
+
+ tokens = {
+ # Whitespace and comments are (almost) everywhere
+ 'whitespace': [
+ (r'\s+', Text),
+ (r'\(\*', Comment.Multiline, 'comment'),
+ ],
+
+ 'delimiters': [
+ # This lexer treats these delimiters specially:
+ # Delimiters define scopes, and the scope is how the meaning of
+ # the `|' is resolved - is it a case/handle expression, or function
+ # definition by cases? (This is not how the Definition works, but
+ # it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
+ (r'\(|\[|\{', Punctuation, 'main'),
+ (r'\)|\]|\}', Punctuation, '#pop'),
+ (r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
+ (r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
+ (r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
+ ],
+
+ 'core': [
+ # Punctuation that doesn't overlap symbolic identifiers
+ (r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved),
+ Punctuation),
+
+ # Special constants: strings, floats, numbers in decimal and hex
+ (r'#"', String.Char, 'char'),
+ (r'"', String.Double, 'string'),
+ (r'~?0x[0-9a-fA-F]+', Number.Hex),
+ (r'0wx[0-9a-fA-F]+', Number.Hex),
+ (r'0w\d+', Number.Integer),
+ (r'~?\d+\.\d+[eE]~?\d+', Number.Float),
+ (r'~?\d+\.\d+', Number.Float),
+ (r'~?\d+[eE]~?\d+', Number.Float),
+ (r'~?\d+', Number.Integer),
+
+ # Labels
+ (r'#\s*[1-9][0-9]*', Name.Label),
+ (r'#\s*(%s)' % alphanumid_re, Name.Label),
+ (r'#\s+(%s)' % symbolicid_re, Name.Label),
+ # Some reserved words trigger a special, local lexer state change
+ (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
+ (r'\b(functor|include|open|signature|structure)\b(?!\')',
+ Keyword.Reserved, 'sname'),
+ (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
+
+ # Regular identifiers, long and otherwise
+ (r'\'[\w\']*', Name.Decorator),
+ (r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
+ (r'(%s)' % alphanumid_re, id_callback),
+ (r'(%s)' % symbolicid_re, id_callback),
+ ],
+ 'dotted': [
+ (r'(%s)(\.)' % alphanumid_re, long_id_callback),
+ (r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
+ (r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
+ (r'\s+', Error),
+ (r'\S+', Error),
+ ],
+
+
+ # Main parser (prevents errors in files that have scoping errors)
+ 'root': [
+ default('main')
+ ],
+
+ # In this scope, I expect '|' to not be followed by a function name,
+ # and I expect 'and' to be followed by a binding site
+ 'main': [
+ include('whitespace'),
+
+ # Special behavior of val/and/fun
+ (r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
+ (r'\b(fun)\b(?!\')', Keyword.Reserved,
+ ('#pop', 'main-fun', 'fname')),
+
+ include('delimiters'),
+ include('core'),
+ (r'\S+', Error),
+ ],
+
+ # In this scope, I expect '|' and 'and' to be followed by a function
+ 'main-fun': [
+ include('whitespace'),
+
+ (r'\s', Text),
+ (r'\(\*', Comment.Multiline, 'comment'),
+
+ # Special behavior of val/and/fun
+ (r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
+ (r'\b(val)\b(?!\')', Keyword.Reserved,
+ ('#pop', 'main', 'vname')),
+
+ # Special behavior of '|' and '|'-manipulating keywords
+ (r'\|', Punctuation, 'fname'),
+ (r'\b(case|handle)\b(?!\')', Keyword.Reserved,
+ ('#pop', 'main')),
+
+ include('delimiters'),
+ include('core'),
+ (r'\S+', Error),
+ ],
+
+ # Character and string parsers
+ 'char': stringy(String.Char),
+ 'string': stringy(String.Double),
+
+ 'breakout': [
+ (r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
+ ],
+
+ # Dealing with what comes after module system keywords
+ 'sname': [
+ include('whitespace'),
+ include('breakout'),
+
+ (r'(%s)' % alphanumid_re, Name.Namespace),
+ default('#pop'),
+ ],
+
+ # Dealing with what comes after the 'fun' (or 'and' or '|') keyword
+ 'fname': [
+ include('whitespace'),
+ (r'\'[\w\']*', Name.Decorator),
+ (r'\(', Punctuation, 'tyvarseq'),
+
+ (r'(%s)' % alphanumid_re, Name.Function, '#pop'),
+ (r'(%s)' % symbolicid_re, Name.Function, '#pop'),
+
+ # Ignore interesting function declarations like "fun (x + y) = ..."
+ default('#pop'),
+ ],
+
+ # Dealing with what comes after the 'val' (or 'and') keyword
+ 'vname': [
+ include('whitespace'),
+ (r'\'[\w\']*', Name.Decorator),
+ (r'\(', Punctuation, 'tyvarseq'),
+
+ (r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
+ bygroups(Name.Variable, Text, Punctuation), '#pop'),
+ (r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
+ bygroups(Name.Variable, Text, Punctuation), '#pop'),
+ (r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
+ (r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
+
+ # Ignore interesting patterns like 'val (x, y)'
+ default('#pop'),
+ ],
+
+ # Dealing with what comes after the 'type' (or 'and') keyword
+ 'tname': [
+ include('whitespace'),
+ include('breakout'),
+
+ (r'\'[\w\']*', Name.Decorator),
+ (r'\(', Punctuation, 'tyvarseq'),
+ (r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
+
+ (r'(%s)' % alphanumid_re, Keyword.Type),
+ (r'(%s)' % symbolicid_re, Keyword.Type),
+ (r'\S+', Error, '#pop'),
+ ],
+
+ # A type binding includes most identifiers
+ 'typbind': [
+ include('whitespace'),
+
+ (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
+
+ include('breakout'),
+ include('core'),
+ (r'\S+', Error, '#pop'),
+ ],
+
+ # Dealing with what comes after the 'datatype' (or 'and') keyword
+ 'dname': [
+ include('whitespace'),
+ include('breakout'),
+
+ (r'\'[\w\']*', Name.Decorator),
+ (r'\(', Punctuation, 'tyvarseq'),
+ (r'(=)(\s*)(datatype)',
+ bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
+ (r'=(?!%s)' % symbolicid_re, Punctuation,
+ ('#pop', 'datbind', 'datcon')),
+
+ (r'(%s)' % alphanumid_re, Keyword.Type),
+ (r'(%s)' % symbolicid_re, Keyword.Type),
+ (r'\S+', Error, '#pop'),
+ ],
+
+ # common case - A | B | C of int
+ 'datbind': [
+ include('whitespace'),
+
+ (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
+ (r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
+ (r'\b(of)\b(?!\')', Keyword.Reserved),
+
+ (r'(\|)(\s*)(%s)' % alphanumid_re,
+ bygroups(Punctuation, Text, Name.Class)),
+ (r'(\|)(\s+)(%s)' % symbolicid_re,
+ bygroups(Punctuation, Text, Name.Class)),
+
+ include('breakout'),
+ include('core'),
+ (r'\S+', Error),
+ ],
+
+ # Dealing with what comes after an exception
+ 'ename': [
+ include('whitespace'),
+
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
+ bygroups(Keyword.Reserved, Text, Name.Class)),
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
+ bygroups(Keyword.Reserved, Text, Name.Class)),
+ (r'\b(of)\b(?!\')', Keyword.Reserved),
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
+
+ default('#pop'),
+ ],
+
+ 'datcon': [
+ include('whitespace'),
+ (r'(%s)' % alphanumid_re, Name.Class, '#pop'),
+ (r'(%s)' % symbolicid_re, Name.Class, '#pop'),
+ (r'\S+', Error, '#pop'),
+ ],
+
+ # Series of type variables
+ 'tyvarseq': [
+ (r'\s', Text),
+ (r'\(\*', Comment.Multiline, 'comment'),
+
+ (r'\'[\w\']*', Name.Decorator),
+ (alphanumid_re, Name),
+ (r',', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ (symbolicid_re, Name),
+ ],
+
+ 'comment': [
+ (r'[^(*)]', Comment.Multiline),
+ (r'\(\*', Comment.Multiline, '#push'),
+ (r'\*\)', Comment.Multiline, '#pop'),
+ (r'[(*)]', Comment.Multiline),
+ ],
+ }
+
+
+class OcamlLexer(RegexLexer):
+ """
+ For the OCaml language.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'OCaml'
+ url = 'https://ocaml.org/'
+ aliases = ['ocaml']
+ filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
+ mimetypes = ['text/x-ocaml']
+
+ keywords = (
+ 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
+ 'downto', 'else', 'end', 'exception', 'external', 'false',
+ 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
+ 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
+ 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
+ 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
+ 'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
+ )
+ keyopts = (
+ '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
+ r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
+ '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
+ r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~'
+ )
+
+ operators = r'[!$%&*+\./:<=>?@^|~-]'
+ word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
+ prefix_syms = r'[!?~]'
+ infix_syms = r'[=<>@^|&+\*/$%-]'
+ primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
+
+ tokens = {
+ 'escape-sequence': [
+ (r'\\[\\"\'ntbr]', String.Escape),
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ ],
+ 'root': [
+ (r'\s+', Text),
+ (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
+ (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name.Class),
+ (r'\(\*(?![)])', Comment, 'comment'),
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword),
+ (r'(%s)' % '|'.join(keyopts[::-1]), Operator),
+ (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
+ (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
+ (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
+
+ (r"[^\W\d][\w']*", Name),
+
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+ (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+ (r'0[oO][0-7][0-7_]*', Number.Oct),
+ (r'0[bB][01][01_]*', Number.Bin),
+ (r'\d[\d_]*', Number.Integer),
+
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+ String.Char),
+ (r"'.'", String.Char),
+ (r"'", Keyword), # a stray quote is another syntax element
+
+ (r'"', String.Double, 'string'),
+
+ (r'[~?][a-z][\w\']*:', Name.Variable),
+ ],
+ 'comment': [
+ (r'[^(*)]+', Comment),
+ (r'\(\*', Comment, '#push'),
+ (r'\*\)', Comment, '#pop'),
+ (r'[(*)]', Comment),
+ ],
+ 'string': [
+ (r'[^\\"]+', String.Double),
+ include('escape-sequence'),
+ (r'\\\n', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'dotted': [
+ (r'\s+', Text),
+ (r'\.', Punctuation),
+ (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+ (r'[A-Z][\w\']*', Name.Class, '#pop'),
+ (r'[a-z_][\w\']*', Name, '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class OpaLexer(RegexLexer):
+ """
+ Lexer for the Opa language.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Opa'
+ aliases = ['opa']
+ filenames = ['*.opa']
+ mimetypes = ['text/x-opa']
+
+ # most of these aren't strictly keywords
+ # but if you color only real keywords, you might just
+ # as well not color anything
+ keywords = (
+ 'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
+ 'else', 'end', 'external', 'forall', 'function', 'if', 'import',
+ 'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
+ 'type', 'val', 'with', 'xml_parser',
+ )
+
+ # matches both stuff and `stuff`
+ ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
+
+ op_re = r'[.=\-<>,@~%/+?*&^!]'
+ punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
+ # because they are also used for inserts
+
+ tokens = {
+ # copied from the caml lexer, should be adapted
+ 'escape-sequence': [
+ (r'\\[\\"\'ntr}]', String.Escape),
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ ],
+
+ # factorizing these rules, because they are inserted many times
+ 'comments': [
+ (r'/\*', Comment, 'nested-comment'),
+ (r'//.*?$', Comment),
+ ],
+ 'comments-and-spaces': [
+ include('comments'),
+ (r'\s+', Text),
+ ],
+
+ 'root': [
+ include('comments-and-spaces'),
+ # keywords
+ (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
+ # directives
+ # we could parse the actual set of directives instead of anything
+ # starting with @, but this is troublesome
+ # because it needs to be adjusted all the time
+ # and assuming we parse only sources that compile, it is useless
+ (r'@' + ident_re + r'\b', Name.Builtin.Pseudo),
+
+ # number literals
+ (r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
+ (r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
+ (r'-?\d+[eE][+\-]?\d+', Number.Float),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'0[oO][0-7]+', Number.Oct),
+ (r'0[bB][01]+', Number.Bin),
+ (r'\d+', Number.Integer),
+ # color literals
+ (r'#[\da-fA-F]{3,6}', Number.Integer),
+
+ # string literals
+ (r'"', String.Double, 'string'),
+ # char literal, should be checked because this is the regexp from
+ # the caml lexer
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
+ String.Char),
+
+ # this is meant to deal with embedded exprs in strings
+ # every time we find a '}' we pop a state so that if we were
+ # inside a string, we are back in the string state
+ # as a consequence, we must also push a state every time we find a
+ # '{' or else we will have errors when parsing {} for instance
+ (r'\{', Operator, '#push'),
+ (r'\}', Operator, '#pop'),
+
+ # html literals
+ # this is a much more strict that the actual parser,
+ # since a<b would not be parsed as html
+ # but then again, the parser is way too lax, and we can't hope
+ # to have something as tolerant
+ (r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
+
+ # db path
+ # matching the '[_]' in '/a[_]' because it is a part
+ # of the syntax of the db path definition
+ # unfortunately, i don't know how to match the ']' in
+ # /a[1], so this is somewhat inconsistent
+ (r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
+ # putting the same color on <- as on db path, since
+ # it can be used only to mean Db.write
+ (r'<-(?!'+op_re+r')', Name.Variable),
+
+ # 'modules'
+ # although modules are not distinguished by their names as in caml
+ # the standard library seems to follow the convention that modules
+ # only area capitalized
+ (r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
+
+ # operators
+ # = has a special role because this is the only
+ # way to syntactic distinguish binding constructions
+ # unfortunately, this colors the equal in {x=2} too
+ (r'=(?!'+op_re+r')', Keyword),
+ (r'(%s)+' % op_re, Operator),
+ (r'(%s)+' % punc_re, Operator),
+
+ # coercions
+ (r':', Operator, 'type'),
+ # type variables
+ # we need this rule because we don't parse specially type
+ # definitions so in "type t('a) = ...", "'a" is parsed by 'root'
+ ("'"+ident_re, Keyword.Type),
+
+ # id literal, #something, or #{expr}
+ (r'#'+ident_re, String.Single),
+ (r'#(?=\{)', String.Single),
+
+ # identifiers
+ # this avoids to color '2' in 'a2' as an integer
+ (ident_re, Text),
+
+ # default, not sure if that is needed or not
+ # (r'.', Text),
+ ],
+
+ # it is quite painful to have to parse types to know where they end
+ # this is the general rule for a type
+ # a type is either:
+ # * -> ty
+ # * type-with-slash
+ # * type-with-slash -> ty
+ # * type-with-slash (, type-with-slash)+ -> ty
+ #
+ # the code is pretty funky in here, but this code would roughly
+ # translate in caml to:
+ # let rec type stream =
+ # match stream with
+ # | [< "->"; stream >] -> type stream
+ # | [< ""; stream >] ->
+ # type_with_slash stream
+ # type_lhs_1 stream;
+ # and type_1 stream = ...
+ 'type': [
+ include('comments-and-spaces'),
+ (r'->', Keyword.Type),
+ default(('#pop', 'type-lhs-1', 'type-with-slash')),
+ ],
+
+ # parses all the atomic or closed constructions in the syntax of type
+ # expressions: record types, tuple types, type constructors, basic type
+ # and type variables
+ 'type-1': [
+ include('comments-and-spaces'),
+ (r'\(', Keyword.Type, ('#pop', 'type-tuple')),
+ (r'~?\{', Keyword.Type, ('#pop', 'type-record')),
+ (ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
+ (ident_re, Keyword.Type, '#pop'),
+ ("'"+ident_re, Keyword.Type),
+ # this case is not in the syntax but sometimes
+ # we think we are parsing types when in fact we are parsing
+ # some css, so we just pop the states until we get back into
+ # the root state
+ default('#pop'),
+ ],
+
+ # type-with-slash is either:
+ # * type-1
+ # * type-1 (/ type-1)+
+ 'type-with-slash': [
+ include('comments-and-spaces'),
+ default(('#pop', 'slash-type-1', 'type-1')),
+ ],
+ 'slash-type-1': [
+ include('comments-and-spaces'),
+ ('/', Keyword.Type, ('#pop', 'type-1')),
+ # same remark as above
+ default('#pop'),
+ ],
+
+ # we go in this state after having parsed a type-with-slash
+ # while trying to parse a type
+ # and at this point we must determine if we are parsing an arrow
+ # type (in which case we must continue parsing) or not (in which
+ # case we stop)
+ 'type-lhs-1': [
+ include('comments-and-spaces'),
+ (r'->', Keyword.Type, ('#pop', 'type')),
+ (r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
+ default('#pop'),
+ ],
+ 'type-arrow': [
+ include('comments-and-spaces'),
+ # the look ahead here allows to parse f(x : int, y : float -> truc)
+ # correctly
+ (r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
+ (r'->', Keyword.Type, ('#pop', 'type')),
+ # same remark as above
+ default('#pop'),
+ ],
+
+ # no need to do precise parsing for tuples and records
+ # because they are closed constructions, so we can simply
+ # find the closing delimiter
+ # note that this function would be not work if the source
+ # contained identifiers like `{)` (although it could be patched
+ # to support it)
+ 'type-tuple': [
+ include('comments-and-spaces'),
+ (r'[^()/*]+', Keyword.Type),
+ (r'[/*]', Keyword.Type),
+ (r'\(', Keyword.Type, '#push'),
+ (r'\)', Keyword.Type, '#pop'),
+ ],
+ 'type-record': [
+ include('comments-and-spaces'),
+ (r'[^{}/*]+', Keyword.Type),
+ (r'[/*]', Keyword.Type),
+ (r'\{', Keyword.Type, '#push'),
+ (r'\}', Keyword.Type, '#pop'),
+ ],
+
+ # 'type-tuple': [
+ # include('comments-and-spaces'),
+ # (r'\)', Keyword.Type, '#pop'),
+ # default(('#pop', 'type-tuple-1', 'type-1')),
+ # ],
+ # 'type-tuple-1': [
+ # include('comments-and-spaces'),
+ # (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
+ # (r',', Keyword.Type, 'type-1'),
+ # ],
+ # 'type-record':[
+ # include('comments-and-spaces'),
+ # (r'\}', Keyword.Type, '#pop'),
+ # (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
+ # ],
+ # 'type-record-field-expr': [
+ #
+ # ],
+
+ 'nested-comment': [
+ (r'[^/*]+', Comment),
+ (r'/\*', Comment, '#push'),
+ (r'\*/', Comment, '#pop'),
+ (r'[/*]', Comment),
+ ],
+
+ # the copy pasting between string and single-string
+ # is kinda sad. Is there a way to avoid that??
+ 'string': [
+ (r'[^\\"{]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\{', Operator, 'root'),
+ include('escape-sequence'),
+ ],
+ 'single-string': [
+ (r'[^\\\'{]+', String.Double),
+ (r'\'', String.Double, '#pop'),
+ (r'\{', Operator, 'root'),
+ include('escape-sequence'),
+ ],
+
+ # all the html stuff
+ # can't really reuse some existing html parser
+ # because we must be able to parse embedded expressions
+
+ # we are in this state after someone parsed the '<' that
+ # started the html literal
+ 'html-open-tag': [
+ (r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
+ (r'>', String.Single, ('#pop', 'html-content')),
+ ],
+
+ # we are in this state after someone parsed the '</' that
+ # started the end of the closing tag
+ 'html-end-tag': [
+ # this is a star, because </> is allowed
+ (r'[\w\-:]*>', String.Single, '#pop'),
+ ],
+
+ # we are in this state after having parsed '<ident(:ident)?'
+ # we thus parse a possibly empty list of attributes
+ 'html-attr': [
+ (r'\s+', Text),
+ (r'[\w\-:]+=', String.Single, 'html-attr-value'),
+ (r'/>', String.Single, '#pop'),
+ (r'>', String.Single, ('#pop', 'html-content')),
+ ],
+
+ 'html-attr-value': [
+ (r"'", String.Single, ('#pop', 'single-string')),
+ (r'"', String.Single, ('#pop', 'string')),
+ (r'#'+ident_re, String.Single, '#pop'),
+ (r'#(?=\{)', String.Single, ('#pop', 'root')),
+ (r'[^"\'{`=<>]+', String.Single, '#pop'),
+ (r'\{', Operator, ('#pop', 'root')), # this is a tail call!
+ ],
+
+ # we should probably deal with '\' escapes here
+ 'html-content': [
+ (r'<!--', Comment, 'html-comment'),
+ (r'</', String.Single, ('#pop', 'html-end-tag')),
+ (r'<', String.Single, 'html-open-tag'),
+ (r'\{', Operator, 'root'),
+ (r'[^<{]+', String.Single),
+ ],
+
+ 'html-comment': [
+ (r'-->', Comment, '#pop'),
+ (r'[^\-]+|-', Comment),
+ ],
+ }
+
+
+class ReasonLexer(RegexLexer):
+ """
+ For the ReasonML language.
+
+ .. versionadded:: 2.6
+ """
+
+ name = 'ReasonML'
+ url = 'https://reasonml.github.io/'
+ aliases = ['reasonml', 'reason']
+ filenames = ['*.re', '*.rei']
+ mimetypes = ['text/x-reasonml']
+
+ keywords = (
+ 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', 'downto',
+ 'else', 'end', 'exception', 'external', 'false', 'for', 'fun', 'esfun',
+ 'function', 'functor', 'if', 'in', 'include', 'inherit', 'initializer', 'lazy',
+ 'let', 'switch', 'module', 'pub', 'mutable', 'new', 'nonrec', 'object', 'of',
+ 'open', 'pri', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
+ 'type', 'val', 'virtual', 'when', 'while', 'with',
+ )
+ keyopts = (
+ '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
+ r'-\.', '=>', r'\.', r'\.\.', r'\.\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
+ '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
+ r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|\|', r'\|]', r'\}', '~'
+ )
+
+ operators = r'[!$%&*+\./:<=>?@^|~-]'
+ word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lsr', 'lxor', 'mod', 'or')
+ prefix_syms = r'[!?~]'
+ infix_syms = r'[=<>@^|&+\*/$%-]'
+ primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
+
+ tokens = {
+ 'escape-sequence': [
+ (r'\\[\\"\'ntbr]', String.Escape),
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ ],
+ 'root': [
+ (r'\s+', Text),
+ (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
+ (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name.Class),
+ (r'//.*?\n', Comment.Single),
+ (r'\/\*(?!/)', Comment.Multiline, 'comment'),
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword),
+ (r'(%s)' % '|'.join(keyopts[::-1]), Operator.Word),
+ (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
+ (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
+ (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
+
+ (r"[^\W\d][\w']*", Name),
+
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+ (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+ (r'0[oO][0-7][0-7_]*', Number.Oct),
+ (r'0[bB][01][01_]*', Number.Bin),
+ (r'\d[\d_]*', Number.Integer),
+
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+ String.Char),
+ (r"'.'", String.Char),
+ (r"'", Keyword),
+
+ (r'"', String.Double, 'string'),
+
+ (r'[~?][a-z][\w\']*:', Name.Variable),
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'\/\*', Comment.Multiline, '#push'),
+ (r'\*\/', Comment.Multiline, '#pop'),
+ (r'\*', Comment.Multiline),
+ ],
+ 'string': [
+ (r'[^\\"]+', String.Double),
+ include('escape-sequence'),
+ (r'\\\n', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'dotted': [
+ (r'\s+', Text),
+ (r'\.', Punctuation),
+ (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+ (r'[A-Z][\w\']*', Name.Class, '#pop'),
+ (r'[a-z_][\w\']*', Name, '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class FStarLexer(RegexLexer):
+ """
+ For the F* language.
+ .. versionadded:: 2.7
+ """
+
+ name = 'FStar'
+ url = 'https://www.fstar-lang.org/'
+ aliases = ['fstar']
+ filenames = ['*.fst', '*.fsti']
+ mimetypes = ['text/x-fstar']
+
+ keywords = (
+ 'abstract', 'attributes', 'noeq', 'unopteq', 'and'
+ 'begin', 'by', 'default', 'effect', 'else', 'end', 'ensures',
+ 'exception', 'exists', 'false', 'forall', 'fun', 'function', 'if',
+ 'in', 'include', 'inline', 'inline_for_extraction', 'irreducible',
+ 'logic', 'match', 'module', 'mutable', 'new', 'new_effect', 'noextract',
+ 'of', 'open', 'opaque', 'private', 'range_of', 'reifiable',
+ 'reify', 'reflectable', 'requires', 'set_range_of', 'sub_effect',
+ 'synth', 'then', 'total', 'true', 'try', 'type', 'unfold', 'unfoldable',
+ 'val', 'when', 'with', 'not'
+ )
+ decl_keywords = ('let', 'rec')
+ assume_keywords = ('assume', 'admit', 'assert', 'calc')
+ keyopts = (
+ r'~', r'-', r'/\\', r'\\/', r'<:', r'<@', r'\(\|', r'\|\)', r'#', r'u#',
+ r'&', r'\(', r'\)', r'\(\)', r',', r'~>', r'->', r'<-', r'<--', r'<==>',
+ r'==>', r'\.', r'\?', r'\?\.', r'\.\[', r'\.\(', r'\.\(\|', r'\.\[\|',
+ r'\{:pattern', r':', r'::', r':=', r';', r';;', r'=', r'%\[', r'!\{',
+ r'\[', r'\[@', r'\[\|', r'\|>', r'\]', r'\|\]', r'\{', r'\|', r'\}', r'\$'
+ )
+
+ operators = r'[!$%&*+\./:<=>?@^|~-]'
+ prefix_syms = r'[!?~]'
+ infix_syms = r'[=<>@^|&+\*/$%-]'
+ primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
+
+ tokens = {
+ 'escape-sequence': [
+ (r'\\[\\"\'ntbr]', String.Escape),
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ ],
+ 'root': [
+ (r'\s+', Text),
+ (r'false|true|False|True|\(\)|\[\]', Name.Builtin.Pseudo),
+ (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name.Class),
+ (r'\(\*(?![)])', Comment, 'comment'),
+ (r'\/\/.+$', Comment),
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword),
+ (r'\b(%s)\b' % '|'.join(assume_keywords), Name.Exception),
+ (r'\b(%s)\b' % '|'.join(decl_keywords), Keyword.Declaration),
+ (r'(%s)' % '|'.join(keyopts[::-1]), Operator),
+ (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
+ (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
+
+ (r"[^\W\d][\w']*", Name),
+
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+ (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+ (r'0[oO][0-7][0-7_]*', Number.Oct),
+ (r'0[bB][01][01_]*', Number.Bin),
+ (r'\d[\d_]*', Number.Integer),
+
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+ String.Char),
+ (r"'.'", String.Char),
+ (r"'", Keyword), # a stray quote is another syntax element
+ (r"\`([\w\'.]+)\`", Operator.Word), # for infix applications
+ (r"\`", Keyword), # for quoting
+ (r'"', String.Double, 'string'),
+
+ (r'[~?][a-z][\w\']*:', Name.Variable),
+ ],
+ 'comment': [
+ (r'[^(*)]+', Comment),
+ (r'\(\*', Comment, '#push'),
+ (r'\*\)', Comment, '#pop'),
+ (r'[(*)]', Comment),
+ ],
+ 'string': [
+ (r'[^\\"]+', String.Double),
+ include('escape-sequence'),
+ (r'\\\n', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'dotted': [
+ (r'\s+', Text),
+ (r'\.', Punctuation),
+ (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+ (r'[A-Z][\w\']*', Name.Class, '#pop'),
+ (r'[a-z_][\w\']*', Name, '#pop'),
+ default('#pop'),
+ ],
+ }
diff --git a/pygments/lexers/modeling.py b/pygments/lexers/modeling.py
new file mode 100644
index 0000000..56448f5
--- /dev/null
+++ b/pygments/lexers/modeling.py
@@ -0,0 +1,369 @@
+"""
+ pygments.lexers.modeling
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for modeling languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+from pygments.lexers.html import HtmlLexer
+from pygments.lexers import _stan_builtins
+
+__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer']
+
+
+class ModelicaLexer(RegexLexer):
+ """
+ For Modelica source code.
+
+ .. versionadded:: 1.1
+ """
+ name = 'Modelica'
+ url = 'http://www.modelica.org/'
+ aliases = ['modelica']
+ filenames = ['*.mo']
+ mimetypes = ['text/x-modelica']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)"
+
+ tokens = {
+ 'whitespace': [
+ (r'[\s\ufeff]+', Text),
+ (r'//[^\n]*\n?', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'root': [
+ include('whitespace'),
+ (r'"', String.Double, 'string'),
+ (r'[()\[\]{},;]+', Punctuation),
+ (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator),
+ (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float),
+ (r'\d+', Number.Integer),
+ (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|'
+ r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|'
+ r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|'
+ r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|'
+ r'identity|inStream|integer|Integer|interval|inverse|isPresent|'
+ r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|'
+ r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|'
+ r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|'
+ r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|'
+ r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|'
+ r'transpose|vector|zeros)\b', Name.Builtin),
+ (r'(algorithm|annotation|break|connect|constant|constrainedby|der|'
+ r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
+ r'equation|exit|expandable|extends|external|firstTick|final|flow|for|if|'
+ r'import|impure|in|initial|inner|input|interval|loop|nondiscrete|outer|'
+ r'output|parameter|partial|protected|public|pure|redeclare|'
+ r'replaceable|return|stream|then|when|while)\b',
+ Keyword.Reserved),
+ (r'(and|not|or)\b', Operator.Word),
+ (r'(block|class|connector|end|function|model|operator|package|'
+ r'record|type)\b', Keyword.Reserved, 'class'),
+ (r'(false|true)\b', Keyword.Constant),
+ (r'within\b', Keyword.Reserved, 'package-prefix'),
+ (_name, Name)
+ ],
+ 'class': [
+ include('whitespace'),
+ (r'(function|record)\b', Keyword.Reserved),
+ (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'),
+ (_name, Name.Class, '#pop'),
+ default('#pop')
+ ],
+ 'package-prefix': [
+ include('whitespace'),
+ (_name, Name.Namespace, '#pop'),
+ default('#pop')
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'\\[\'"?\\abfnrtv]', String.Escape),
+ (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))',
+ using(HtmlLexer)),
+ (r'<|\\?[^"\\<]+', String.Double)
+ ]
+ }
+
+
+class BugsLexer(RegexLexer):
+ """
+ Pygments Lexer for OpenBugs and WinBugs
+ models.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'BUGS'
+ aliases = ['bugs', 'winbugs', 'openbugs']
+ filenames = ['*.bug']
+
+ _FUNCTIONS = (
+ # Scalar functions
+ 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
+ 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
+ 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
+ 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
+ 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
+ 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
+ 'trunc',
+ # Vector functions
+ 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
+ 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
+ 'sd', 'sort', 'sum',
+ # Special
+ 'D', 'I', 'F', 'T', 'C')
+ """ OpenBUGS built-in functions
+
+ From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
+
+ This also includes
+
+ - T, C, I : Truncation and censoring.
+ ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
+ - D : ODE
+ - F : Functional http://www.openbugs.info/Examples/Functionals.html
+
+ """
+
+ _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
+ 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
+ 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
+ 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
+ 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
+ 'dmt', 'dwish')
+ """ OpenBUGS built-in distributions
+
+ Functions from
+ http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
+ """
+
+ tokens = {
+ 'whitespace': [
+ (r"\s+", Text),
+ ],
+ 'comments': [
+ # Comments
+ (r'#.*$', Comment.Single),
+ ],
+ 'root': [
+ # Comments
+ include('comments'),
+ include('whitespace'),
+ # Block start
+ (r'(model)(\s+)(\{)',
+ bygroups(Keyword.Namespace, Text, Punctuation)),
+ # Reserved Words
+ (r'(for|in)(?![\w.])', Keyword.Reserved),
+ # Built-in Functions
+ (r'(%s)(?=\s*\()'
+ % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
+ Name.Builtin),
+ # Regular variable names
+ (r'[A-Za-z][\w.]*', Name),
+ # Number Literals
+ (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
+ # Punctuation
+ (r'\[|\]|\(|\)|:|,|;', Punctuation),
+ # Assignment operators
+ # SLexer makes these tokens Operators.
+ (r'<-|~', Operator),
+ # Infix and prefix operators
+ (r'\+|-|\*|/', Operator),
+ # Block
+ (r'[{}]', Punctuation),
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r"^\s*model\s*{", text, re.M):
+ return 0.7
+ else:
+ return 0.0
+
+
+class JagsLexer(RegexLexer):
+ """
+ Pygments Lexer for JAGS.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'JAGS'
+ aliases = ['jags']
+ filenames = ['*.jag', '*.bug']
+
+ # JAGS
+ _FUNCTIONS = (
+ 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
+ 'cos', 'cosh', 'cloglog',
+ 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
+ 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
+ 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
+ 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
+ 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
+ # Truncation/Censoring (should I include)
+ 'T', 'I')
+ # Distributions with density, probability and quartile functions
+ _DISTRIBUTIONS = tuple('[dpq]%s' % x for x in
+ ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
+ 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
+ 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib'))
+ # Other distributions without density and probability
+ _OTHER_DISTRIBUTIONS = (
+ 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
+ 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
+ 'dnbinom', 'dweibull', 'ddirich')
+
+ tokens = {
+ 'whitespace': [
+ (r"\s+", Text),
+ ],
+ 'names': [
+ # Regular variable names
+ (r'[a-zA-Z][\w.]*\b', Name),
+ ],
+ 'comments': [
+ # do not use stateful comments
+ (r'(?s)/\*.*?\*/', Comment.Multiline),
+ # Comments
+ (r'#.*$', Comment.Single),
+ ],
+ 'root': [
+ # Comments
+ include('comments'),
+ include('whitespace'),
+ # Block start
+ (r'(model|data)(\s+)(\{)',
+ bygroups(Keyword.Namespace, Text, Punctuation)),
+ (r'var(?![\w.])', Keyword.Declaration),
+ # Reserved Words
+ (r'(for|in)(?![\w.])', Keyword.Reserved),
+ # Builtins
+ # Need to use lookahead because . is a valid char
+ (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ + _DISTRIBUTIONS
+ + _OTHER_DISTRIBUTIONS),
+ Name.Builtin),
+ # Names
+ include('names'),
+ # Number Literals
+ (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
+ (r'\[|\]|\(|\)|:|,|;', Punctuation),
+ # Assignment operators
+ (r'<-|~', Operator),
+ # # JAGS includes many more than OpenBUGS
+ (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
+ (r'[{}]', Punctuation),
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r'^\s*model\s*\{', text, re.M):
+ if re.search(r'^\s*data\s*\{', text, re.M):
+ return 0.9
+ elif re.search(r'^\s*var', text, re.M):
+ return 0.9
+ else:
+ return 0.3
+ else:
+ return 0
+
+
+class StanLexer(RegexLexer):
+ """Pygments Lexer for Stan models.
+
+ The Stan modeling language is specified in the *Stan Modeling Language
+ User's Guide and Reference Manual, v2.17.0*,
+ `pdf <https://github.com/stan-dev/stan/releases/download/v2.17.0/stan-reference-2.17.0.pdf>`__.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Stan'
+ aliases = ['stan']
+ filenames = ['*.stan']
+
+ tokens = {
+ 'whitespace': [
+ (r"\s+", Text),
+ ],
+ 'comments': [
+ (r'(?s)/\*.*?\*/', Comment.Multiline),
+ # Comments
+ (r'(//|#).*$', Comment.Single),
+ ],
+ 'root': [
+ (r'"[^"]*"', String),
+ # Comments
+ include('comments'),
+ # block start
+ include('whitespace'),
+ # Block start
+ (r'(%s)(\s*)(\{)' %
+ r'|'.join(('functions', 'data', r'transformed\s+?data',
+ 'parameters', r'transformed\s+parameters',
+ 'model', r'generated\s+quantities')),
+ bygroups(Keyword.Namespace, Text, Punctuation)),
+ # target keyword
+ (r'target\s*\+=', Keyword),
+ # Reserved Words
+ (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword),
+ # Truncation
+ (r'T(?=\s*\[)', Keyword),
+ # Data types
+ (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type),
+ # < should be punctuation, but elsewhere I can't tell if it is in
+ # a range constraint
+ (r'(<)(\s*)(upper|lower|offset|multiplier)(\s*)(=)',
+ bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)),
+ (r'(,)(\s*)(upper)(\s*)(=)',
+ bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)),
+ # Punctuation
+ (r"[;,\[\]()]", Punctuation),
+ # Builtin
+ (r'(%s)(?=\s*\()' % '|'.join(_stan_builtins.FUNCTIONS), Name.Builtin),
+ (r'(~)(\s*)(%s)(?=\s*\()' % '|'.join(_stan_builtins.DISTRIBUTIONS),
+ bygroups(Operator, Whitespace, Name.Builtin)),
+ # Special names ending in __, like lp__
+ (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo),
+ (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved),
+ # user-defined functions
+ (r'[A-Za-z]\w*(?=\s*\()]', Name.Function),
+ # Imaginary Literals
+ (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?i', Number.Float),
+ (r'\.[0-9]+([eE][+-]?[0-9]+)?i', Number.Float),
+ (r'[0-9]+i', Number.Float),
+ # Real Literals
+ (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float),
+ # Integer Literals
+ (r'[0-9]+', Number.Integer),
+ # Regular variable names
+ (r'[A-Za-z]\w*\b', Name),
+ # Assignment operators
+ (r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator),
+ # Infix, prefix and postfix operators (and = )
+ (r"\+|-|\.?\*|\.?/|\\|'|\.?\^|!=?|<=?|>=?|\|\||&&|%|\?|:|%/%|!", Operator),
+ # Block delimiters
+ (r'[{}]', Punctuation),
+ # Distribution |
+ (r'\|', Punctuation)
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search(r'^\s*parameters\s*\{', text, re.M):
+ return 1.0
+ else:
+ return 0.0
diff --git a/pygments/lexers/modula2.py b/pygments/lexers/modula2.py
new file mode 100644
index 0000000..a94d486
--- /dev/null
+++ b/pygments/lexers/modula2.py
@@ -0,0 +1,1580 @@
+"""
+ pygments.lexers.modula2
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Multi-Dialect Lexer for Modula-2.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.util import get_bool_opt, get_list_opt
+from pygments.token import Text, Comment, Operator, Keyword, Name, \
+ String, Number, Punctuation, Error
+
+__all__ = ['Modula2Lexer']
+
+
+# Multi-Dialect Modula-2 Lexer
+class Modula2Lexer(RegexLexer):
+ """
+ For Modula-2 source code.
+
+ The Modula-2 lexer supports several dialects. By default, it operates in
+ fallback mode, recognising the *combined* literals, punctuation symbols
+ and operators of all supported dialects, and the *combined* reserved words
+ and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not
+ differentiating between library defined identifiers.
+
+ To select a specific dialect, a dialect option may be passed
+ or a dialect tag may be embedded into a source file.
+
+ Dialect Options:
+
+ `m2pim`
+ Select PIM Modula-2 dialect.
+ `m2iso`
+ Select ISO Modula-2 dialect.
+ `m2r10`
+ Select Modula-2 R10 dialect.
+ `objm2`
+ Select Objective Modula-2 dialect.
+
+ The PIM and ISO dialect options may be qualified with a language extension.
+
+ Language Extensions:
+
+ `+aglet`
+ Select Aglet Modula-2 extensions, available with m2iso.
+ `+gm2`
+ Select GNU Modula-2 extensions, available with m2pim.
+ `+p1`
+ Select p1 Modula-2 extensions, available with m2iso.
+ `+xds`
+ Select XDS Modula-2 extensions, available with m2iso.
+
+
+ Passing a Dialect Option via Unix Commandline Interface
+
+ Dialect options may be passed to the lexer using the `dialect` key.
+ Only one such option should be passed. If multiple dialect options are
+ passed, the first valid option is used, any subsequent options are ignored.
+
+ Examples:
+
+ `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`
+ Use ISO dialect to render input to HTML output
+ `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`
+ Use ISO dialect with p1 extensions to render input to RTF output
+
+
+ Embedding a Dialect Option within a source file
+
+ A dialect option may be embedded in a source file in form of a dialect
+ tag, a specially formatted comment that specifies a dialect option.
+
+ Dialect Tag EBNF::
+
+ dialectTag :
+ OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
+
+ dialectOption :
+ 'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
+ 'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
+
+ Prefix : '!' ;
+
+ OpeningCommentDelim : '(*' ;
+
+ ClosingCommentDelim : '*)' ;
+
+ No whitespace is permitted between the tokens of a dialect tag.
+
+ In the event that a source file contains multiple dialect tags, the first
+ tag that contains a valid dialect option will be used and any subsequent
+ dialect tags will be ignored. Ideally, a dialect tag should be placed
+ at the beginning of a source file.
+
+ An embedded dialect tag overrides a dialect option set via command line.
+
+ Examples:
+
+ ``(*!m2r10*) DEFINITION MODULE Foobar; ...``
+ Use Modula2 R10 dialect to render this source file.
+ ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``
+ Use PIM dialect with GNU extensions to render this source file.
+
+
+ Algol Publication Mode:
+
+ In Algol publication mode, source text is rendered for publication of
+ algorithms in scientific papers and academic texts, following the format
+ of the Revised Algol-60 Language Report. It is activated by passing
+ one of two corresponding styles as an option:
+
+ `algol`
+ render reserved words lowercase underline boldface
+ and builtins lowercase boldface italic
+ `algol_nu`
+ render reserved words lowercase boldface (no underlining)
+ and builtins lowercase boldface italic
+
+ The lexer automatically performs the required lowercase conversion when
+ this mode is activated.
+
+ Example:
+
+ ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``
+ Render input file in Algol publication mode to LaTeX output.
+
+
+ Rendering Mode of First Class ADT Identifiers:
+
+ The rendering of standard library first class ADT identifiers is controlled
+ by option flag "treat_stdlib_adts_as_builtins".
+
+ When this option is turned on, standard library ADT identifiers are rendered
+ as builtins. When it is turned off, they are rendered as ordinary library
+ identifiers.
+
+ `treat_stdlib_adts_as_builtins` (default: On)
+
+ The option is useful for dialects that support ADTs as first class objects
+ and provide ADTs in the standard library that would otherwise be built-in.
+
+ At present, only Modula-2 R10 supports library ADTs as first class objects
+ and therefore, no ADT identifiers are defined for any other dialects.
+
+ Example:
+
+ ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``
+ Render standard library ADTs as ordinary library types.
+
+ .. versionadded:: 1.3
+
+ .. versionchanged:: 2.1
+ Added multi-dialect support.
+ """
+ name = 'Modula-2'
+ url = 'http://www.modula2.org/'
+ aliases = ['modula2', 'm2']
+ filenames = ['*.def', '*.mod']
+ mimetypes = ['text/x-modula2']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'whitespace': [
+ (r'\n+', Text), # blank lines
+ (r'\s+', Text), # whitespace
+ ],
+ 'dialecttags': [
+ # PIM Dialect Tag
+ (r'\(\*!m2pim\*\)', Comment.Special),
+ # ISO Dialect Tag
+ (r'\(\*!m2iso\*\)', Comment.Special),
+ # M2R10 Dialect Tag
+ (r'\(\*!m2r10\*\)', Comment.Special),
+ # ObjM2 Dialect Tag
+ (r'\(\*!objm2\*\)', Comment.Special),
+ # Aglet Extensions Dialect Tag
+ (r'\(\*!m2iso\+aglet\*\)', Comment.Special),
+ # GNU Extensions Dialect Tag
+ (r'\(\*!m2pim\+gm2\*\)', Comment.Special),
+ # p1 Extensions Dialect Tag
+ (r'\(\*!m2iso\+p1\*\)', Comment.Special),
+ # XDS Extensions Dialect Tag
+ (r'\(\*!m2iso\+xds\*\)', Comment.Special),
+ ],
+ 'identifiers': [
+ (r'([a-zA-Z_$][\w$]*)', Name),
+ ],
+ 'prefixed_number_literals': [
+ #
+ # Base-2, whole number
+ (r'0b[01]+(\'[01]+)*', Number.Bin),
+ #
+ # Base-16, whole number
+ (r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex),
+ ],
+ 'plain_number_literals': [
+ #
+ # Base-10, real number with exponent
+ (r'[0-9]+(\'[0-9]+)*' # integral part
+ r'\.[0-9]+(\'[0-9]+)*' # fractional part
+ r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent
+ Number.Float),
+ #
+ # Base-10, real number without exponent
+ (r'[0-9]+(\'[0-9]+)*' # integral part
+ r'\.[0-9]+(\'[0-9]+)*', # fractional part
+ Number.Float),
+ #
+ # Base-10, whole number
+ (r'[0-9]+(\'[0-9]+)*', Number.Integer),
+ ],
+ 'suffixed_number_literals': [
+ #
+ # Base-8, whole number
+ (r'[0-7]+B', Number.Oct),
+ #
+ # Base-8, character code
+ (r'[0-7]+C', Number.Oct),
+ #
+ # Base-16, number
+ (r'[0-9A-F]+H', Number.Hex),
+ ],
+ 'string_literals': [
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ],
+ 'digraph_operators': [
+ # Dot Product Operator
+ (r'\*\.', Operator),
+ # Array Concatenation Operator
+ (r'\+>', Operator), # M2R10 + ObjM2
+ # Inequality Operator
+ (r'<>', Operator), # ISO + PIM
+ # Less-Or-Equal, Subset
+ (r'<=', Operator),
+ # Greater-Or-Equal, Superset
+ (r'>=', Operator),
+ # Identity Operator
+ (r'==', Operator), # M2R10 + ObjM2
+ # Type Conversion Operator
+ (r'::', Operator), # M2R10 + ObjM2
+ # Assignment Symbol
+ (r':=', Operator),
+ # Postfix Increment Mutator
+ (r'\+\+', Operator), # M2R10 + ObjM2
+ # Postfix Decrement Mutator
+ (r'--', Operator), # M2R10 + ObjM2
+ ],
+ 'unigraph_operators': [
+ # Arithmetic Operators
+ (r'[+-]', Operator),
+ (r'[*/]', Operator),
+ # ISO 80000-2 compliant Set Difference Operator
+ (r'\\', Operator), # M2R10 + ObjM2
+ # Relational Operators
+ (r'[=#<>]', Operator),
+ # Dereferencing Operator
+ (r'\^', Operator),
+ # Dereferencing Operator Synonym
+ (r'@', Operator), # ISO
+ # Logical AND Operator Synonym
+ (r'&', Operator), # PIM + ISO
+ # Logical NOT Operator Synonym
+ (r'~', Operator), # PIM + ISO
+ # Smalltalk Message Prefix
+ (r'`', Operator), # ObjM2
+ ],
+ 'digraph_punctuation': [
+ # Range Constructor
+ (r'\.\.', Punctuation),
+ # Opening Chevron Bracket
+ (r'<<', Punctuation), # M2R10 + ISO
+ # Closing Chevron Bracket
+ (r'>>', Punctuation), # M2R10 + ISO
+ # Blueprint Punctuation
+ (r'->', Punctuation), # M2R10 + ISO
+ # Distinguish |# and # in M2 R10
+ (r'\|#', Punctuation),
+ # Distinguish ## and # in M2 R10
+ (r'##', Punctuation),
+ # Distinguish |* and * in M2 R10
+ (r'\|\*', Punctuation),
+ ],
+ 'unigraph_punctuation': [
+ # Common Punctuation
+ (r'[()\[\]{},.:;|]', Punctuation),
+ # Case Label Separator Synonym
+ (r'!', Punctuation), # ISO
+ # Blueprint Punctuation
+ (r'\?', Punctuation), # M2R10 + ObjM2
+ ],
+ 'comments': [
+ # Single Line Comment
+ (r'^//.*?\n', Comment.Single), # M2R10 + ObjM2
+ # Block Comment
+ (r'\(\*([^$].*?)\*\)', Comment.Multiline),
+ # Template Block Comment
+ (r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2
+ ],
+ 'pragmas': [
+ # ISO Style Pragmas
+ (r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2
+ # Pascal Style Pragmas
+ (r'\(\*\$.*?\*\)', Comment.Preproc), # PIM
+ ],
+ 'root': [
+ include('whitespace'),
+ include('dialecttags'),
+ include('pragmas'),
+ include('comments'),
+ include('identifiers'),
+ include('suffixed_number_literals'), # PIM + ISO
+ include('prefixed_number_literals'), # M2R10 + ObjM2
+ include('plain_number_literals'),
+ include('string_literals'),
+ include('digraph_punctuation'),
+ include('digraph_operators'),
+ include('unigraph_punctuation'),
+ include('unigraph_operators'),
+ ]
+ }
+
+# C o m m o n D a t a s e t s
+
+ # Common Reserved Words Dataset
+ common_reserved_words = (
+ # 37 common reserved words
+ 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
+ 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',
+ 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',
+ 'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
+ 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
+ )
+
+ # Common Builtins Dataset
+ common_builtins = (
+ # 16 common builtins
+ 'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',
+ 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',
+ 'TRUE',
+ )
+
+ # Common Pseudo-Module Builtins Dataset
+ common_pseudo_builtins = (
+ # 4 common pseudo builtins
+ 'ADDRESS', 'BYTE', 'WORD', 'ADR'
+ )
+
+# P I M M o d u l a - 2 D a t a s e t s
+
+ # Lexemes to Mark as Error Tokens for PIM Modula-2
+ pim_lexemes_to_reject = (
+ '!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.',
+ '+>', '->', '<<', '>>', '|#', '##',
+ )
+
+ # PIM Modula-2 Additional Reserved Words Dataset
+ pim_additional_reserved_words = (
+ # 3 additional reserved words
+ 'EXPORT', 'QUALIFIED', 'WITH',
+ )
+
+ # PIM Modula-2 Additional Builtins Dataset
+ pim_additional_builtins = (
+ # 16 additional builtins
+ 'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',
+ 'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',
+ )
+
+ # PIM Modula-2 Additional Pseudo-Module Builtins Dataset
+ pim_additional_pseudo_builtins = (
+ # 5 additional pseudo builtins
+ 'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',
+ )
+
+# I S O M o d u l a - 2 D a t a s e t s
+
+ # Lexemes to Mark as Error Tokens for ISO Modula-2
+ iso_lexemes_to_reject = (
+ '`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->',
+ '<<', '>>', '|#', '##',
+ )
+
+ # ISO Modula-2 Additional Reserved Words Dataset
+ iso_additional_reserved_words = (
+ # 9 additional reserved words (ISO 10514-1)
+ 'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',
+ 'REM', 'RETRY', 'WITH',
+ # 10 additional reserved words (ISO 10514-2 & ISO 10514-3)
+ 'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',
+ 'REVEAL', 'TRACED', 'UNSAFEGUARDED',
+ )
+
+ # ISO Modula-2 Additional Builtins Dataset
+ iso_additional_builtins = (
+ # 26 additional builtins (ISO 10514-1)
+ 'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',
+ 'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH',
+ 'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',
+ 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
+ # 5 additional builtins (ISO 10514-2 & ISO 10514-3)
+ 'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',
+ )
+
+ # ISO Modula-2 Additional Pseudo-Module Builtins Dataset
+ iso_additional_pseudo_builtins = (
+ # 14 additional builtins (SYSTEM)
+ 'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',
+ 'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',
+ 'ROTATE', 'SHIFT', 'CAST', 'TSIZE',
+ # 13 additional builtins (COROUTINES)
+ 'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',
+ 'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',
+ 'NEWCOROUTINE', 'PROT', 'TRANSFER',
+ # 9 additional builtins (EXCEPTIONS)
+ 'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',
+ 'ExceptionSource', 'GetMessage', 'IsCurrentSource',
+ 'IsExceptionalExecution', 'RAISE',
+ # 3 additional builtins (TERMINATION)
+ 'TERMINATION', 'IsTerminating', 'HasHalted',
+ # 4 additional builtins (M2EXCEPTION)
+ 'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',
+ 'indexException', 'rangeException', 'caseSelectException',
+ 'invalidLocation', 'functionException', 'wholeValueException',
+ 'wholeDivException', 'realValueException', 'realDivException',
+ 'complexValueException', 'complexDivException', 'protException',
+ 'sysException', 'coException', 'exException',
+ )
+
+# M o d u l a - 2 R 1 0 D a t a s e t s
+
+ # Lexemes to Mark as Error Tokens for Modula-2 R10
+ m2r10_lexemes_to_reject = (
+ '!', '`', '@', '$', '%', '&', '<>',
+ )
+
+ # Modula-2 R10 reserved words in addition to the common set
+ m2r10_additional_reserved_words = (
+ # 12 additional reserved words
+ 'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',
+ 'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',
+ # 2 additional reserved words with symbolic assembly option
+ 'ASM', 'REG',
+ )
+
+ # Modula-2 R10 builtins in addition to the common set
+ m2r10_additional_builtins = (
+ # 26 additional builtins
+ 'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',
+ 'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',
+ 'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',
+ 'UNICHAR', 'WRITE', 'WRITEF',
+ )
+
+ # Modula-2 R10 Additional Pseudo-Module Builtins Dataset
+ m2r10_additional_pseudo_builtins = (
+ # 13 additional builtins (TPROPERTIES)
+ 'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',
+ 'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',
+ 'TMAXEXP', 'TMINEXP',
+ # 4 additional builtins (CONVERSION)
+ 'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',
+ # 35 additional builtins (UNSAFE)
+ 'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',
+ 'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',
+ 'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',
+ 'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',
+ 'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',
+ # 11 additional builtins (ATOMIC)
+ 'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',
+ 'BWNAND', 'BWOR', 'BWXOR',
+ # 7 additional builtins (COMPILER)
+ 'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',
+ 'HASH',
+ # 5 additional builtins (ASSEMBLER)
+ 'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',
+ )
+
+# O b j e c t i v e M o d u l a - 2 D a t a s e t s
+
+ # Lexemes to Mark as Error Tokens for Objective Modula-2
+ objm2_lexemes_to_reject = (
+ '!', '$', '%', '&', '<>',
+ )
+
+ # Objective Modula-2 Extensions
+ # reserved words in addition to Modula-2 R10
+ objm2_additional_reserved_words = (
+ # 16 additional reserved words
+ 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
+ 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
+ 'SUPER', 'TRY',
+ )
+
+ # Objective Modula-2 Extensions
+ # builtins in addition to Modula-2 R10
+ objm2_additional_builtins = (
+ # 3 additional builtins
+ 'OBJECT', 'NO', 'YES',
+ )
+
+ # Objective Modula-2 Extensions
+ # pseudo-module builtins in addition to Modula-2 R10
+ objm2_additional_pseudo_builtins = (
+ # None
+ )
+
+# A g l e t M o d u l a - 2 D a t a s e t s
+
+ # Aglet Extensions
+ # reserved words in addition to ISO Modula-2
+ aglet_additional_reserved_words = (
+ # None
+ )
+
+ # Aglet Extensions
+ # builtins in addition to ISO Modula-2
+ aglet_additional_builtins = (
+ # 9 additional builtins
+ 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
+ 'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',
+ )
+
+ # Aglet Modula-2 Extensions
+ # pseudo-module builtins in addition to ISO Modula-2
+ aglet_additional_pseudo_builtins = (
+ # None
+ )
+
+# G N U M o d u l a - 2 D a t a s e t s
+
+ # GNU Extensions
+ # reserved words in addition to PIM Modula-2
+ gm2_additional_reserved_words = (
+ # 10 additional reserved words
+ 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
+ '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
+ )
+
+ # GNU Extensions
+ # builtins in addition to PIM Modula-2
+ gm2_additional_builtins = (
+ # 21 additional builtins
+ 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
+ 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
+ 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
+ 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
+ )
+
+ # GNU Extensions
+ # pseudo-module builtins in addition to PIM Modula-2
+ gm2_additional_pseudo_builtins = (
+ # None
+ )
+
+# p 1 M o d u l a - 2 D a t a s e t s
+
+ # p1 Extensions
+ # reserved words in addition to ISO Modula-2
+ p1_additional_reserved_words = (
+ # None
+ )
+
+ # p1 Extensions
+ # builtins in addition to ISO Modula-2
+ p1_additional_builtins = (
+ # None
+ )
+
+ # p1 Modula-2 Extensions
+ # pseudo-module builtins in addition to ISO Modula-2
+ p1_additional_pseudo_builtins = (
+ # 1 additional builtin
+ 'BCD',
+ )
+
+# X D S M o d u l a - 2 D a t a s e t s
+
+ # XDS Extensions
+ # reserved words in addition to ISO Modula-2
+ xds_additional_reserved_words = (
+ # 1 additional reserved word
+ 'SEQ',
+ )
+
+ # XDS Extensions
+ # builtins in addition to ISO Modula-2
+ xds_additional_builtins = (
+ # 9 additional builtins
+ 'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',
+ 'LONGCARD', 'SHORTCARD', 'SHORTINT',
+ )
+
+ # XDS Modula-2 Extensions
+ # pseudo-module builtins in addition to ISO Modula-2
+ xds_additional_pseudo_builtins = (
+ # 22 additional builtins (SYSTEM)
+ 'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',
+ 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',
+ 'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'
+ # 3 additional builtins (COMPILER)
+ 'COMPILER', 'OPTION', 'EQUATION'
+ )
+
+# P I M S t a n d a r d L i b r a r y D a t a s e t s
+
+ # PIM Modula-2 Standard Library Modules Dataset
+ pim_stdlib_module_identifiers = (
+ 'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',
+ )
+
+ # PIM Modula-2 Standard Library Types Dataset
+ pim_stdlib_type_identifiers = (
+ 'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',
+ 'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',
+ 'DirectoryCommand',
+ )
+
+ # PIM Modula-2 Standard Library Procedures Dataset
+ pim_stdlib_proc_identifiers = (
+ 'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',
+ 'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',
+ 'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',
+ 'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',
+ 'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',
+ 'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',
+ 'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',
+ 'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',
+ 'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',
+ 'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',
+ )
+
+ # PIM Modula-2 Standard Library Variables Dataset
+ pim_stdlib_var_identifiers = (
+ 'Done', 'termCH', 'in', 'out'
+ )
+
+ # PIM Modula-2 Standard Library Constants Dataset
+ pim_stdlib_const_identifiers = (
+ 'EOL',
+ )
+
+# I S O S t a n d a r d L i b r a r y D a t a s e t s
+
+ # ISO Modula-2 Standard Library Modules Dataset
+ iso_stdlib_module_identifiers = (
+ # TO DO
+ )
+
+ # ISO Modula-2 Standard Library Types Dataset
+ iso_stdlib_type_identifiers = (
+ # TO DO
+ )
+
+ # ISO Modula-2 Standard Library Procedures Dataset
+ iso_stdlib_proc_identifiers = (
+ # TO DO
+ )
+
+ # ISO Modula-2 Standard Library Variables Dataset
+ iso_stdlib_var_identifiers = (
+ # TO DO
+ )
+
+ # ISO Modula-2 Standard Library Constants Dataset
+ iso_stdlib_const_identifiers = (
+ # TO DO
+ )
+
+# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s
+
+ # Modula-2 R10 Standard Library ADTs Dataset
+ m2r10_stdlib_adt_identifiers = (
+ 'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',
+ 'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',
+ 'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',
+ 'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',
+ 'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',
+ 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',
+ 'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',
+ 'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',
+ 'INT64', 'INT128', 'STRING', 'UNISTRING',
+ )
+
+ # Modula-2 R10 Standard Library Blueprints Dataset
+ m2r10_stdlib_blueprint_identifiers = (
+ 'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',
+ 'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',
+ 'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',
+ 'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',
+ 'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',
+ 'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',
+ 'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',
+ )
+
+ # Modula-2 R10 Standard Library Modules Dataset
+ m2r10_stdlib_module_identifiers = (
+ 'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',
+ 'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',
+ 'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',
+ 'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',
+ 'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',
+ )
+
+ # Modula-2 R10 Standard Library Types Dataset
+ m2r10_stdlib_type_identifiers = (
+ 'File', 'Status',
+ # TO BE COMPLETED
+ )
+
+ # Modula-2 R10 Standard Library Procedures Dataset
+ m2r10_stdlib_proc_identifiers = (
+ 'ALLOCATE', 'DEALLOCATE', 'SIZE',
+ # TO BE COMPLETED
+ )
+
+ # Modula-2 R10 Standard Library Variables Dataset
+ m2r10_stdlib_var_identifiers = (
+ 'stdIn', 'stdOut', 'stdErr',
+ )
+
+ # Modula-2 R10 Standard Library Constants Dataset
+ m2r10_stdlib_const_identifiers = (
+ 'pi', 'tau',
+ )
+
+# D i a l e c t s
+
+ # Dialect modes
+ dialects = (
+ 'unknown',
+ 'm2pim', 'm2iso', 'm2r10', 'objm2',
+ 'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',
+ )
+
+# D a t a b a s e s
+
+ # Lexemes to Mark as Errors Database
+ lexemes_to_reject_db = {
+ # Lexemes to reject for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Lexemes to reject for PIM Modula-2
+ 'm2pim': (
+ pim_lexemes_to_reject,
+ ),
+ # Lexemes to reject for ISO Modula-2
+ 'm2iso': (
+ iso_lexemes_to_reject,
+ ),
+ # Lexemes to reject for Modula-2 R10
+ 'm2r10': (
+ m2r10_lexemes_to_reject,
+ ),
+ # Lexemes to reject for Objective Modula-2
+ 'objm2': (
+ objm2_lexemes_to_reject,
+ ),
+ # Lexemes to reject for Aglet Modula-2
+ 'm2iso+aglet': (
+ iso_lexemes_to_reject,
+ ),
+ # Lexemes to reject for GNU Modula-2
+ 'm2pim+gm2': (
+ pim_lexemes_to_reject,
+ ),
+ # Lexemes to reject for p1 Modula-2
+ 'm2iso+p1': (
+ iso_lexemes_to_reject,
+ ),
+ # Lexemes to reject for XDS Modula-2
+ 'm2iso+xds': (
+ iso_lexemes_to_reject,
+ ),
+ }
+
+ # Reserved Words Database
+ reserved_words_db = {
+ # Reserved words for unknown dialect
+ 'unknown': (
+ common_reserved_words,
+ pim_additional_reserved_words,
+ iso_additional_reserved_words,
+ m2r10_additional_reserved_words,
+ ),
+
+ # Reserved words for PIM Modula-2
+ 'm2pim': (
+ common_reserved_words,
+ pim_additional_reserved_words,
+ ),
+
+ # Reserved words for Modula-2 R10
+ 'm2iso': (
+ common_reserved_words,
+ iso_additional_reserved_words,
+ ),
+
+ # Reserved words for ISO Modula-2
+ 'm2r10': (
+ common_reserved_words,
+ m2r10_additional_reserved_words,
+ ),
+
+ # Reserved words for Objective Modula-2
+ 'objm2': (
+ common_reserved_words,
+ m2r10_additional_reserved_words,
+ objm2_additional_reserved_words,
+ ),
+
+ # Reserved words for Aglet Modula-2 Extensions
+ 'm2iso+aglet': (
+ common_reserved_words,
+ iso_additional_reserved_words,
+ aglet_additional_reserved_words,
+ ),
+
+ # Reserved words for GNU Modula-2 Extensions
+ 'm2pim+gm2': (
+ common_reserved_words,
+ pim_additional_reserved_words,
+ gm2_additional_reserved_words,
+ ),
+
+ # Reserved words for p1 Modula-2 Extensions
+ 'm2iso+p1': (
+ common_reserved_words,
+ iso_additional_reserved_words,
+ p1_additional_reserved_words,
+ ),
+
+ # Reserved words for XDS Modula-2 Extensions
+ 'm2iso+xds': (
+ common_reserved_words,
+ iso_additional_reserved_words,
+ xds_additional_reserved_words,
+ ),
+ }
+
+ # Builtins Database
+ builtins_db = {
+ # Builtins for unknown dialect
+ 'unknown': (
+ common_builtins,
+ pim_additional_builtins,
+ iso_additional_builtins,
+ m2r10_additional_builtins,
+ ),
+
+ # Builtins for PIM Modula-2
+ 'm2pim': (
+ common_builtins,
+ pim_additional_builtins,
+ ),
+
+ # Builtins for ISO Modula-2
+ 'm2iso': (
+ common_builtins,
+ iso_additional_builtins,
+ ),
+
+ # Builtins for ISO Modula-2
+ 'm2r10': (
+ common_builtins,
+ m2r10_additional_builtins,
+ ),
+
+ # Builtins for Objective Modula-2
+ 'objm2': (
+ common_builtins,
+ m2r10_additional_builtins,
+ objm2_additional_builtins,
+ ),
+
+ # Builtins for Aglet Modula-2 Extensions
+ 'm2iso+aglet': (
+ common_builtins,
+ iso_additional_builtins,
+ aglet_additional_builtins,
+ ),
+
+ # Builtins for GNU Modula-2 Extensions
+ 'm2pim+gm2': (
+ common_builtins,
+ pim_additional_builtins,
+ gm2_additional_builtins,
+ ),
+
+ # Builtins for p1 Modula-2 Extensions
+ 'm2iso+p1': (
+ common_builtins,
+ iso_additional_builtins,
+ p1_additional_builtins,
+ ),
+
+ # Builtins for XDS Modula-2 Extensions
+ 'm2iso+xds': (
+ common_builtins,
+ iso_additional_builtins,
+ xds_additional_builtins,
+ ),
+ }
+
+ # Pseudo-Module Builtins Database
+ pseudo_builtins_db = {
+ # Builtins for unknown dialect
+ 'unknown': (
+ common_pseudo_builtins,
+ pim_additional_pseudo_builtins,
+ iso_additional_pseudo_builtins,
+ m2r10_additional_pseudo_builtins,
+ ),
+
+ # Builtins for PIM Modula-2
+ 'm2pim': (
+ common_pseudo_builtins,
+ pim_additional_pseudo_builtins,
+ ),
+
+ # Builtins for ISO Modula-2
+ 'm2iso': (
+ common_pseudo_builtins,
+ iso_additional_pseudo_builtins,
+ ),
+
+ # Builtins for ISO Modula-2
+ 'm2r10': (
+ common_pseudo_builtins,
+ m2r10_additional_pseudo_builtins,
+ ),
+
+ # Builtins for Objective Modula-2
+ 'objm2': (
+ common_pseudo_builtins,
+ m2r10_additional_pseudo_builtins,
+ objm2_additional_pseudo_builtins,
+ ),
+
+ # Builtins for Aglet Modula-2 Extensions
+ 'm2iso+aglet': (
+ common_pseudo_builtins,
+ iso_additional_pseudo_builtins,
+ aglet_additional_pseudo_builtins,
+ ),
+
+ # Builtins for GNU Modula-2 Extensions
+ 'm2pim+gm2': (
+ common_pseudo_builtins,
+ pim_additional_pseudo_builtins,
+ gm2_additional_pseudo_builtins,
+ ),
+
+ # Builtins for p1 Modula-2 Extensions
+ 'm2iso+p1': (
+ common_pseudo_builtins,
+ iso_additional_pseudo_builtins,
+ p1_additional_pseudo_builtins,
+ ),
+
+ # Builtins for XDS Modula-2 Extensions
+ 'm2iso+xds': (
+ common_pseudo_builtins,
+ iso_additional_pseudo_builtins,
+ xds_additional_pseudo_builtins,
+ ),
+ }
+
+ # Standard Library ADTs Database
+ stdlib_adts_db = {
+ # Empty entry for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Standard Library ADTs for PIM Modula-2
+ 'm2pim': (
+ # No first class library types
+ ),
+
+ # Standard Library ADTs for ISO Modula-2
+ 'm2iso': (
+ # No first class library types
+ ),
+
+ # Standard Library ADTs for Modula-2 R10
+ 'm2r10': (
+ m2r10_stdlib_adt_identifiers,
+ ),
+
+ # Standard Library ADTs for Objective Modula-2
+ 'objm2': (
+ m2r10_stdlib_adt_identifiers,
+ ),
+
+ # Standard Library ADTs for Aglet Modula-2
+ 'm2iso+aglet': (
+ # No first class library types
+ ),
+
+ # Standard Library ADTs for GNU Modula-2
+ 'm2pim+gm2': (
+ # No first class library types
+ ),
+
+ # Standard Library ADTs for p1 Modula-2
+ 'm2iso+p1': (
+ # No first class library types
+ ),
+
+ # Standard Library ADTs for XDS Modula-2
+ 'm2iso+xds': (
+ # No first class library types
+ ),
+ }
+
+ # Standard Library Modules Database
+ stdlib_modules_db = {
+ # Empty entry for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Standard Library Modules for PIM Modula-2
+ 'm2pim': (
+ pim_stdlib_module_identifiers,
+ ),
+
+ # Standard Library Modules for ISO Modula-2
+ 'm2iso': (
+ iso_stdlib_module_identifiers,
+ ),
+
+ # Standard Library Modules for Modula-2 R10
+ 'm2r10': (
+ m2r10_stdlib_blueprint_identifiers,
+ m2r10_stdlib_module_identifiers,
+ m2r10_stdlib_adt_identifiers,
+ ),
+
+ # Standard Library Modules for Objective Modula-2
+ 'objm2': (
+ m2r10_stdlib_blueprint_identifiers,
+ m2r10_stdlib_module_identifiers,
+ ),
+
+ # Standard Library Modules for Aglet Modula-2
+ 'm2iso+aglet': (
+ iso_stdlib_module_identifiers,
+ ),
+
+ # Standard Library Modules for GNU Modula-2
+ 'm2pim+gm2': (
+ pim_stdlib_module_identifiers,
+ ),
+
+ # Standard Library Modules for p1 Modula-2
+ 'm2iso+p1': (
+ iso_stdlib_module_identifiers,
+ ),
+
+ # Standard Library Modules for XDS Modula-2
+ 'm2iso+xds': (
+ iso_stdlib_module_identifiers,
+ ),
+ }
+
+ # Standard Library Types Database
+ stdlib_types_db = {
+ # Empty entry for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Standard Library Types for PIM Modula-2
+ 'm2pim': (
+ pim_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for ISO Modula-2
+ 'm2iso': (
+ iso_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for Modula-2 R10
+ 'm2r10': (
+ m2r10_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for Objective Modula-2
+ 'objm2': (
+ m2r10_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for Aglet Modula-2
+ 'm2iso+aglet': (
+ iso_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for GNU Modula-2
+ 'm2pim+gm2': (
+ pim_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for p1 Modula-2
+ 'm2iso+p1': (
+ iso_stdlib_type_identifiers,
+ ),
+
+ # Standard Library Types for XDS Modula-2
+ 'm2iso+xds': (
+ iso_stdlib_type_identifiers,
+ ),
+ }
+
+ # Standard Library Procedures Database
+ stdlib_procedures_db = {
+ # Empty entry for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Standard Library Procedures for PIM Modula-2
+ 'm2pim': (
+ pim_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for ISO Modula-2
+ 'm2iso': (
+ iso_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for Modula-2 R10
+ 'm2r10': (
+ m2r10_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for Objective Modula-2
+ 'objm2': (
+ m2r10_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for Aglet Modula-2
+ 'm2iso+aglet': (
+ iso_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for GNU Modula-2
+ 'm2pim+gm2': (
+ pim_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for p1 Modula-2
+ 'm2iso+p1': (
+ iso_stdlib_proc_identifiers,
+ ),
+
+ # Standard Library Procedures for XDS Modula-2
+ 'm2iso+xds': (
+ iso_stdlib_proc_identifiers,
+ ),
+ }
+
+ # Standard Library Variables Database
+ stdlib_variables_db = {
+ # Empty entry for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Standard Library Variables for PIM Modula-2
+ 'm2pim': (
+ pim_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for ISO Modula-2
+ 'm2iso': (
+ iso_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for Modula-2 R10
+ 'm2r10': (
+ m2r10_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for Objective Modula-2
+ 'objm2': (
+ m2r10_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for Aglet Modula-2
+ 'm2iso+aglet': (
+ iso_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for GNU Modula-2
+ 'm2pim+gm2': (
+ pim_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for p1 Modula-2
+ 'm2iso+p1': (
+ iso_stdlib_var_identifiers,
+ ),
+
+ # Standard Library Variables for XDS Modula-2
+ 'm2iso+xds': (
+ iso_stdlib_var_identifiers,
+ ),
+ }
+
+ # Standard Library Constants Database
+ stdlib_constants_db = {
+ # Empty entry for unknown dialect
+ 'unknown': (
+ # LEAVE THIS EMPTY
+ ),
+ # Standard Library Constants for PIM Modula-2
+ 'm2pim': (
+ pim_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for ISO Modula-2
+ 'm2iso': (
+ iso_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for Modula-2 R10
+ 'm2r10': (
+ m2r10_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for Objective Modula-2
+ 'objm2': (
+ m2r10_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for Aglet Modula-2
+ 'm2iso+aglet': (
+ iso_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for GNU Modula-2
+ 'm2pim+gm2': (
+ pim_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for p1 Modula-2
+ 'm2iso+p1': (
+ iso_stdlib_const_identifiers,
+ ),
+
+ # Standard Library Constants for XDS Modula-2
+ 'm2iso+xds': (
+ iso_stdlib_const_identifiers,
+ ),
+ }
+
+# M e t h o d s
+
+ # initialise a lexer instance
+ def __init__(self, **options):
+ #
+ # check dialect options
+ #
+ dialects = get_list_opt(options, 'dialect', [])
+ #
+ for dialect_option in dialects:
+ if dialect_option in self.dialects[1:-1]:
+ # valid dialect option found
+ self.set_dialect(dialect_option)
+ break
+ #
+ # Fallback Mode (DEFAULT)
+ else:
+ # no valid dialect option
+ self.set_dialect('unknown')
+ #
+ self.dialect_set_by_tag = False
+ #
+ # check style options
+ #
+ styles = get_list_opt(options, 'style', [])
+ #
+ # use lowercase mode for Algol style
+ if 'algol' in styles or 'algol_nu' in styles:
+ self.algol_publication_mode = True
+ else:
+ self.algol_publication_mode = False
+ #
+ # Check option flags
+ #
+ self.treat_stdlib_adts_as_builtins = get_bool_opt(
+ options, 'treat_stdlib_adts_as_builtins', True)
+ #
+ # call superclass initialiser
+ RegexLexer.__init__(self, **options)
+
+ # Set lexer to a specified dialect
+ def set_dialect(self, dialect_id):
+ #
+ # if __debug__:
+ # print 'entered set_dialect with arg: ', dialect_id
+ #
+ # check dialect name against known dialects
+ if dialect_id not in self.dialects:
+ dialect = 'unknown' # default
+ else:
+ dialect = dialect_id
+ #
+ # compose lexemes to reject set
+ lexemes_to_reject_set = set()
+ # add each list of reject lexemes for this dialect
+ for list in self.lexemes_to_reject_db[dialect]:
+ lexemes_to_reject_set.update(set(list))
+ #
+ # compose reserved words set
+ reswords_set = set()
+ # add each list of reserved words for this dialect
+ for list in self.reserved_words_db[dialect]:
+ reswords_set.update(set(list))
+ #
+ # compose builtins set
+ builtins_set = set()
+ # add each list of builtins for this dialect excluding reserved words
+ for list in self.builtins_db[dialect]:
+ builtins_set.update(set(list).difference(reswords_set))
+ #
+ # compose pseudo-builtins set
+ pseudo_builtins_set = set()
+ # add each list of builtins for this dialect excluding reserved words
+ for list in self.pseudo_builtins_db[dialect]:
+ pseudo_builtins_set.update(set(list).difference(reswords_set))
+ #
+ # compose ADTs set
+ adts_set = set()
+ # add each list of ADTs for this dialect excluding reserved words
+ for list in self.stdlib_adts_db[dialect]:
+ adts_set.update(set(list).difference(reswords_set))
+ #
+ # compose modules set
+ modules_set = set()
+ # add each list of builtins for this dialect excluding builtins
+ for list in self.stdlib_modules_db[dialect]:
+ modules_set.update(set(list).difference(builtins_set))
+ #
+ # compose types set
+ types_set = set()
+ # add each list of types for this dialect excluding builtins
+ for list in self.stdlib_types_db[dialect]:
+ types_set.update(set(list).difference(builtins_set))
+ #
+ # compose procedures set
+ procedures_set = set()
+ # add each list of procedures for this dialect excluding builtins
+ for list in self.stdlib_procedures_db[dialect]:
+ procedures_set.update(set(list).difference(builtins_set))
+ #
+ # compose variables set
+ variables_set = set()
+ # add each list of variables for this dialect excluding builtins
+ for list in self.stdlib_variables_db[dialect]:
+ variables_set.update(set(list).difference(builtins_set))
+ #
+ # compose constants set
+ constants_set = set()
+ # add each list of constants for this dialect excluding builtins
+ for list in self.stdlib_constants_db[dialect]:
+ constants_set.update(set(list).difference(builtins_set))
+ #
+ # update lexer state
+ self.dialect = dialect
+ self.lexemes_to_reject = lexemes_to_reject_set
+ self.reserved_words = reswords_set
+ self.builtins = builtins_set
+ self.pseudo_builtins = pseudo_builtins_set
+ self.adts = adts_set
+ self.modules = modules_set
+ self.types = types_set
+ self.procedures = procedures_set
+ self.variables = variables_set
+ self.constants = constants_set
+ #
+ # if __debug__:
+ # print 'exiting set_dialect'
+ # print ' self.dialect: ', self.dialect
+ # print ' self.lexemes_to_reject: ', self.lexemes_to_reject
+ # print ' self.reserved_words: ', self.reserved_words
+ # print ' self.builtins: ', self.builtins
+ # print ' self.pseudo_builtins: ', self.pseudo_builtins
+ # print ' self.adts: ', self.adts
+ # print ' self.modules: ', self.modules
+ # print ' self.types: ', self.types
+ # print ' self.procedures: ', self.procedures
+ # print ' self.variables: ', self.variables
+ # print ' self.types: ', self.types
+ # print ' self.constants: ', self.constants
+
+ # Extracts a dialect name from a dialect tag comment string and checks
+ # the extracted name against known dialects. If a match is found, the
+ # matching name is returned, otherwise dialect id 'unknown' is returned
+ def get_dialect_from_dialect_tag(self, dialect_tag):
+ #
+ # if __debug__:
+ # print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag
+ #
+ # constants
+ left_tag_delim = '(*!'
+ right_tag_delim = '*)'
+ left_tag_delim_len = len(left_tag_delim)
+ right_tag_delim_len = len(right_tag_delim)
+ indicator_start = left_tag_delim_len
+ indicator_end = -(right_tag_delim_len)
+ #
+ # check comment string for dialect indicator
+ if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \
+ and dialect_tag.startswith(left_tag_delim) \
+ and dialect_tag.endswith(right_tag_delim):
+ #
+ # if __debug__:
+ # print 'dialect tag found'
+ #
+ # extract dialect indicator
+ indicator = dialect_tag[indicator_start:indicator_end]
+ #
+ # if __debug__:
+ # print 'extracted: ', indicator
+ #
+ # check against known dialects
+ for index in range(1, len(self.dialects)):
+ #
+ # if __debug__:
+ # print 'dialects[', index, ']: ', self.dialects[index]
+ #
+ if indicator == self.dialects[index]:
+ #
+ # if __debug__:
+ # print 'matching dialect found'
+ #
+ # indicator matches known dialect
+ return indicator
+ else:
+ # indicator does not match any dialect
+ return 'unknown' # default
+ else:
+ # invalid indicator string
+ return 'unknown' # default
+
+ # intercept the token stream, modify token attributes and return them
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
+ #
+ # check for dialect tag if dialect has not been set by tag
+ if not self.dialect_set_by_tag and token == Comment.Special:
+ indicated_dialect = self.get_dialect_from_dialect_tag(value)
+ if indicated_dialect != 'unknown':
+ # token is a dialect indicator
+ # reset reserved words and builtins
+ self.set_dialect(indicated_dialect)
+ self.dialect_set_by_tag = True
+ #
+ # check for reserved words, predefined and stdlib identifiers
+ if token is Name:
+ if value in self.reserved_words:
+ token = Keyword.Reserved
+ if self.algol_publication_mode:
+ value = value.lower()
+ #
+ elif value in self.builtins:
+ token = Name.Builtin
+ if self.algol_publication_mode:
+ value = value.lower()
+ #
+ elif value in self.pseudo_builtins:
+ token = Name.Builtin.Pseudo
+ if self.algol_publication_mode:
+ value = value.lower()
+ #
+ elif value in self.adts:
+ if not self.treat_stdlib_adts_as_builtins:
+ token = Name.Namespace
+ else:
+ token = Name.Builtin.Pseudo
+ if self.algol_publication_mode:
+ value = value.lower()
+ #
+ elif value in self.modules:
+ token = Name.Namespace
+ #
+ elif value in self.types:
+ token = Name.Class
+ #
+ elif value in self.procedures:
+ token = Name.Function
+ #
+ elif value in self.variables:
+ token = Name.Variable
+ #
+ elif value in self.constants:
+ token = Name.Constant
+ #
+ elif token in Number:
+ #
+ # mark prefix number literals as error for PIM and ISO dialects
+ if self.dialect not in ('unknown', 'm2r10', 'objm2'):
+ if "'" in value or value[0:2] in ('0b', '0x', '0u'):
+ token = Error
+ #
+ elif self.dialect in ('m2r10', 'objm2'):
+ # mark base-8 number literals as errors for M2 R10 and ObjM2
+ if token is Number.Oct:
+ token = Error
+ # mark suffix base-16 literals as errors for M2 R10 and ObjM2
+ elif token is Number.Hex and 'H' in value:
+ token = Error
+ # mark real numbers with E as errors for M2 R10 and ObjM2
+ elif token is Number.Float and 'E' in value:
+ token = Error
+ #
+ elif token in Comment:
+ #
+ # mark single line comment as error for PIM and ISO dialects
+ if token is Comment.Single:
+ if self.dialect not in ('unknown', 'm2r10', 'objm2'):
+ token = Error
+ #
+ if token is Comment.Preproc:
+ # mark ISO pragma as error for PIM dialects
+ if value.startswith('<*') and \
+ self.dialect.startswith('m2pim'):
+ token = Error
+ # mark PIM pragma as comment for other dialects
+ elif value.startswith('(*$') and \
+ self.dialect != 'unknown' and \
+ not self.dialect.startswith('m2pim'):
+ token = Comment.Multiline
+ #
+ else: # token is neither Name nor Comment
+ #
+ # mark lexemes matching the dialect's error token set as errors
+ if value in self.lexemes_to_reject:
+ token = Error
+ #
+ # substitute lexemes when in Algol mode
+ if self.algol_publication_mode:
+ if value == '#':
+ value = '≠'
+ elif value == '<=':
+ value = '≤'
+ elif value == '>=':
+ value = '≥'
+ elif value == '==':
+ value = '≡'
+ elif value == '*.':
+ value = '•'
+
+ # return result
+ yield index, token, value
+
+ def analyse_text(text):
+ """It's Pascal-like, but does not use FUNCTION -- uses PROCEDURE
+ instead."""
+
+ # Check if this looks like Pascal, if not, bail out early
+ if not ('(*' in text and '*)' in text and ':=' in text):
+ return
+
+ result = 0
+ # Procedure is in Modula2
+ if re.search(r'\bPROCEDURE\b', text):
+ result += 0.6
+
+ # FUNCTION is only valid in Pascal, but not in Modula2
+ if re.search(r'\bFUNCTION\b', text):
+ result = 0.0
+
+ return result
diff --git a/pygments/lexers/monte.py b/pygments/lexers/monte.py
new file mode 100644
index 0000000..e22fb93
--- /dev/null
+++ b/pygments/lexers/monte.py
@@ -0,0 +1,204 @@
+"""
+ pygments.lexers.monte
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Monte programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
+ Punctuation, String, Whitespace
+from pygments.lexer import RegexLexer, include, words
+
+__all__ = ['MonteLexer']
+
+
+# `var` handled separately
+# `interface` handled separately
+_declarations = ['bind', 'def', 'fn', 'object']
+_methods = ['method', 'to']
+_keywords = [
+ 'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports',
+ 'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import',
+ 'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when',
+ 'while',
+]
+_operators = [
+ # Unary
+ '~', '!',
+ # Binary
+ '+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>',
+ # Binary augmented
+ '+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=',
+ # Comparison
+ '==', '!=', '<', '<=', '>', '>=', '<=>',
+ # Patterns and assignment
+ ':=', '?', '=~', '!~', '=>',
+ # Calls and sends
+ '.', '<-', '->',
+]
+_escape_pattern = (
+ r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
+ r'\\["\'\\bftnr])')
+# _char = _escape_chars + [('.', String.Char)]
+_identifier = r'[_a-zA-Z]\w*'
+
+_constants = [
+ # Void constants
+ 'null',
+ # Bool constants
+ 'false', 'true',
+ # Double constants
+ 'Infinity', 'NaN',
+ # Special objects
+ 'M', 'Ref', 'throw', 'traceln',
+]
+
+_guards = [
+ 'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double',
+ 'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless',
+ 'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void',
+]
+
+_safeScope = [
+ '_accumulateList', '_accumulateMap', '_auditedBy', '_bind',
+ '_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop',
+ '_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList',
+ '_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc',
+ '_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot',
+ '_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher',
+ '_slotToBinding', '_splitList', '_suchThat', '_switchFailed',
+ '_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser',
+ 'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser',
+]
+
+
+class MonteLexer(RegexLexer):
+ """
+ Lexer for the Monte programming language.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Monte'
+ url = 'https://monte.readthedocs.io/'
+ aliases = ['monte']
+ filenames = ['*.mt']
+
+ tokens = {
+ 'root': [
+ # Comments
+ (r'#[^\n]*\n', Comment),
+
+ # Docstrings
+ # Apologies for the non-greedy matcher here.
+ (r'/\*\*.*?\*/', String.Doc),
+
+ # `var` declarations
+ (r'\bvar\b', Keyword.Declaration, 'var'),
+
+ # `interface` declarations
+ (r'\binterface\b', Keyword.Declaration, 'interface'),
+
+ # method declarations
+ (words(_methods, prefix='\\b', suffix='\\b'),
+ Keyword, 'method'),
+
+ # All other declarations
+ (words(_declarations, prefix='\\b', suffix='\\b'),
+ Keyword.Declaration),
+
+ # Keywords
+ (words(_keywords, prefix='\\b', suffix='\\b'), Keyword),
+
+ # Literals
+ ('[+-]?0x[_0-9a-fA-F]+', Number.Hex),
+ (r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float),
+ ('[+-]?[_0-9]+', Number.Integer),
+ ("'", String.Double, 'char'),
+ ('"', String.Double, 'string'),
+
+ # Quasiliterals
+ ('`', String.Backtick, 'ql'),
+
+ # Operators
+ (words(_operators), Operator),
+
+ # Verb operators
+ (_identifier + '=', Operator.Word),
+
+ # Safe scope constants
+ (words(_constants, prefix='\\b', suffix='\\b'),
+ Keyword.Pseudo),
+
+ # Safe scope guards
+ (words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type),
+
+ # All other safe scope names
+ (words(_safeScope, prefix='\\b', suffix='\\b'),
+ Name.Builtin),
+
+ # Identifiers
+ (_identifier, Name),
+
+ # Punctuation
+ (r'\(|\)|\{|\}|\[|\]|:|,', Punctuation),
+
+ # Whitespace
+ (' +', Whitespace),
+
+ # Definite lexer errors
+ ('=', Error),
+ ],
+ 'char': [
+ # It is definitely an error to have a char of width == 0.
+ ("'", Error, 'root'),
+ (_escape_pattern, String.Escape, 'charEnd'),
+ ('.', String.Char, 'charEnd'),
+ ],
+ 'charEnd': [
+ ("'", String.Char, '#pop:2'),
+ # It is definitely an error to have a char of width > 1.
+ ('.', Error),
+ ],
+ # The state of things coming into an interface.
+ 'interface': [
+ (' +', Whitespace),
+ (_identifier, Name.Class, '#pop'),
+ include('root'),
+ ],
+ # The state of things coming into a method.
+ 'method': [
+ (' +', Whitespace),
+ (_identifier, Name.Function, '#pop'),
+ include('root'),
+ ],
+ 'string': [
+ ('"', String.Double, 'root'),
+ (_escape_pattern, String.Escape),
+ (r'\n', String.Double),
+ ('.', String.Double),
+ ],
+ 'ql': [
+ ('`', String.Backtick, 'root'),
+ (r'\$' + _escape_pattern, String.Escape),
+ (r'\$\$', String.Escape),
+ (r'@@', String.Escape),
+ (r'\$\{', String.Interpol, 'qlNest'),
+ (r'@\{', String.Interpol, 'qlNest'),
+ (r'\$' + _identifier, Name),
+ ('@' + _identifier, Name),
+ ('.', String.Backtick),
+ ],
+ 'qlNest': [
+ (r'\}', String.Interpol, '#pop'),
+ include('root'),
+ ],
+ # The state of things immediately following `var`.
+ 'var': [
+ (' +', Whitespace),
+ (_identifier, Name.Variable, '#pop'),
+ include('root'),
+ ],
+ }
diff --git a/pygments/lexers/mosel.py b/pygments/lexers/mosel.py
new file mode 100644
index 0000000..1214ba4
--- /dev/null
+++ b/pygments/lexers/mosel.py
@@ -0,0 +1,447 @@
+"""
+ pygments.lexers.mosel
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the mosel language.
+ http://www.fico.com/en/products/fico-xpress-optimization
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['MoselLexer']
+
+FUNCTIONS = (
+ # core functions
+ '_',
+ 'abs',
+ 'arctan',
+ 'asproc',
+ 'assert',
+ 'bitflip',
+ 'bitneg',
+ 'bitset',
+ 'bitshift',
+ 'bittest',
+ 'bitval',
+ 'ceil',
+ 'cos',
+ 'create',
+ 'currentdate',
+ 'currenttime',
+ 'cutelt',
+ 'cutfirst',
+ 'cuthead',
+ 'cutlast',
+ 'cuttail',
+ 'datablock',
+ 'delcell',
+ 'exists',
+ 'exit',
+ 'exp',
+ 'exportprob',
+ 'fclose',
+ 'fflush',
+ 'finalize',
+ 'findfirst',
+ 'findlast',
+ 'floor',
+ 'fopen',
+ 'fselect',
+ 'fskipline',
+ 'fwrite',
+ 'fwrite_',
+ 'fwriteln',
+ 'fwriteln_',
+ 'getact',
+ 'getcoeff',
+ 'getcoeffs',
+ 'getdual',
+ 'getelt',
+ 'getfid',
+ 'getfirst',
+ 'getfname',
+ 'gethead',
+ 'getlast',
+ 'getobjval',
+ 'getparam',
+ 'getrcost',
+ 'getreadcnt',
+ 'getreverse',
+ 'getsize',
+ 'getslack',
+ 'getsol',
+ 'gettail',
+ 'gettype',
+ 'getvars',
+ 'isdynamic',
+ 'iseof',
+ 'isfinite',
+ 'ishidden',
+ 'isinf',
+ 'isnan',
+ 'isodd',
+ 'ln',
+ 'localsetparam',
+ 'log',
+ 'makesos1',
+ 'makesos2',
+ 'maxlist',
+ 'memoryuse',
+ 'minlist',
+ 'newmuid',
+ 'publish',
+ 'random',
+ 'read',
+ 'readln',
+ 'reset',
+ 'restoreparam',
+ 'reverse',
+ 'round',
+ 'setcoeff',
+ 'sethidden',
+ 'setioerr',
+ 'setmatherr',
+ 'setname',
+ 'setparam',
+ 'setrandseed',
+ 'setrange',
+ 'settype',
+ 'sin',
+ 'splithead',
+ 'splittail',
+ 'sqrt',
+ 'strfmt',
+ 'substr',
+ 'timestamp',
+ 'unpublish',
+ 'versionnum',
+ 'versionstr',
+ 'write',
+ 'write_',
+ 'writeln',
+ 'writeln_',
+
+ # mosel exam mmxprs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u
+ 'addcut',
+ 'addcuts',
+ 'addmipsol',
+ 'basisstability',
+ 'calcsolinfo',
+ 'clearmipdir',
+ 'clearmodcut',
+ 'command',
+ 'copysoltoinit',
+ 'crossoverlpsol',
+ 'defdelayedrows',
+ 'defsecurevecs',
+ 'delcuts',
+ 'dropcuts',
+ 'estimatemarginals',
+ 'fixglobal',
+ 'flushmsgq',
+ 'getbstat',
+ 'getcnlist',
+ 'getcplist',
+ 'getdualray',
+ 'getiis',
+ 'getiissense',
+ 'getiistype',
+ 'getinfcause',
+ 'getinfeas',
+ 'getlb',
+ 'getlct',
+ 'getleft',
+ 'getloadedlinctrs',
+ 'getloadedmpvars',
+ 'getname',
+ 'getprimalray',
+ 'getprobstat',
+ 'getrange',
+ 'getright',
+ 'getsensrng',
+ 'getsize',
+ 'getsol',
+ 'gettype',
+ 'getub',
+ 'getvars',
+ 'gety',
+ 'hasfeature',
+ 'implies',
+ 'indicator',
+ 'initglobal',
+ 'ishidden',
+ 'isiisvalid',
+ 'isintegral',
+ 'loadbasis',
+ 'loadcuts',
+ 'loadlpsol',
+ 'loadmipsol',
+ 'loadprob',
+ 'maximise',
+ 'maximize',
+ 'minimise',
+ 'minimize',
+ 'postsolve',
+ 'readbasis',
+ 'readdirs',
+ 'readsol',
+ 'refinemipsol',
+ 'rejectintsol',
+ 'repairinfeas',
+ 'repairinfeas_deprec',
+ 'resetbasis',
+ 'resetiis',
+ 'resetsol',
+ 'savebasis',
+ 'savemipsol',
+ 'savesol',
+ 'savestate',
+ 'selectsol',
+ 'setarchconsistency',
+ 'setbstat',
+ 'setcallback',
+ 'setcbcutoff',
+ 'setgndata',
+ 'sethidden',
+ 'setlb',
+ 'setmipdir',
+ 'setmodcut',
+ 'setsol',
+ 'setub',
+ 'setucbdata',
+ 'stopoptimise',
+ 'stopoptimize',
+ 'storecut',
+ 'storecuts',
+ 'unloadprob',
+ 'uselastbarsol',
+ 'writebasis',
+ 'writedirs',
+ 'writeprob',
+ 'writesol',
+ 'xor',
+ 'xprs_addctr',
+ 'xprs_addindic',
+
+ # mosel exam mmsystem | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u
+ 'addmonths',
+ 'copytext',
+ 'cuttext',
+ 'deltext',
+ 'endswith',
+ 'erase',
+ 'expandpath',
+ 'fcopy',
+ 'fdelete',
+ 'findfiles',
+ 'findtext',
+ 'fmove',
+ 'formattext',
+ 'getasnumber',
+ 'getchar',
+ 'getcwd',
+ 'getdate',
+ 'getday',
+ 'getdaynum',
+ 'getdays',
+ 'getdirsep',
+ 'getdsoparam',
+ 'getendparse',
+ 'getenv',
+ 'getfsize',
+ 'getfstat',
+ 'getftime',
+ 'gethour',
+ 'getminute',
+ 'getmonth',
+ 'getmsec',
+ 'getoserrmsg',
+ 'getoserror',
+ 'getpathsep',
+ 'getqtype',
+ 'getsecond',
+ 'getsepchar',
+ 'getsize',
+ 'getstart',
+ 'getsucc',
+ 'getsysinfo',
+ 'getsysstat',
+ 'gettime',
+ 'gettmpdir',
+ 'gettrim',
+ 'getweekday',
+ 'getyear',
+ 'inserttext',
+ 'isvalid',
+ 'jointext',
+ 'makedir',
+ 'makepath',
+ 'newtar',
+ 'newzip',
+ 'nextfield',
+ 'openpipe',
+ 'parseextn',
+ 'parseint',
+ 'parsereal',
+ 'parsetext',
+ 'pastetext',
+ 'pathmatch',
+ 'pathsplit',
+ 'qsort',
+ 'quote',
+ 'readtextline',
+ 'regmatch',
+ 'regreplace',
+ 'removedir',
+ 'removefiles',
+ 'setchar',
+ 'setdate',
+ 'setday',
+ 'setdsoparam',
+ 'setendparse',
+ 'setenv',
+ 'sethour',
+ 'setminute',
+ 'setmonth',
+ 'setmsec',
+ 'setoserror',
+ 'setqtype',
+ 'setsecond',
+ 'setsepchar',
+ 'setstart',
+ 'setsucc',
+ 'settime',
+ 'settrim',
+ 'setyear',
+ 'sleep',
+ 'splittext',
+ 'startswith',
+ 'system',
+ 'tarlist',
+ 'textfmt',
+ 'tolower',
+ 'toupper',
+ 'trim',
+ 'untar',
+ 'unzip',
+ 'ziplist',
+
+ # mosel exam mmjobs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u
+ 'canceltimer',
+ 'clearaliases',
+ 'compile',
+ 'connect',
+ 'detach',
+ 'disconnect',
+ 'dropnextevent',
+ 'findxsrvs',
+ 'getaliases',
+ 'getannidents',
+ 'getannotations',
+ 'getbanner',
+ 'getclass',
+ 'getdsoprop',
+ 'getdsopropnum',
+ 'getexitcode',
+ 'getfromgid',
+ 'getfromid',
+ 'getfromuid',
+ 'getgid',
+ 'gethostalias',
+ 'getid',
+ 'getmodprop',
+ 'getmodpropnum',
+ 'getnextevent',
+ 'getnode',
+ 'getrmtid',
+ 'getstatus',
+ 'getsysinfo',
+ 'gettimer',
+ 'getuid',
+ 'getvalue',
+ 'isqueueempty',
+ 'load',
+ 'nullevent',
+ 'peeknextevent',
+ 'resetmodpar',
+ 'run',
+ 'send',
+ 'setcontrol',
+ 'setdefstream',
+ 'setgid',
+ 'sethostalias',
+ 'setmodpar',
+ 'settimer',
+ 'setuid',
+ 'setworkdir',
+ 'stop',
+ 'unload',
+ 'wait',
+ 'waitexpired',
+ 'waitfor',
+ 'waitforend',
+)
+
+
+class MoselLexer(RegexLexer):
+ """
+ For the Mosel optimization language.
+
+ .. versionadded:: 2.6
+ """
+ name = 'Mosel'
+ aliases = ['mosel']
+ filenames = ['*.mos']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'\s+', Text.Whitespace),
+ (r'!.*?\n', Comment.Single),
+ (r'\(!(.|\n)*?!\)', Comment.Multiline),
+ (words((
+ 'and', 'as', 'break', 'case', 'count', 'declarations', 'do',
+ 'dynamic', 'elif', 'else', 'end-', 'end', 'evaluation', 'false',
+ 'forall', 'forward', 'from', 'function', 'hashmap', 'if',
+ 'imports', 'include', 'initialisations', 'initializations', 'inter',
+ 'max', 'min', 'model', 'namespace', 'next', 'not', 'nsgroup',
+ 'nssearch', 'of', 'options', 'or', 'package', 'parameters',
+ 'procedure', 'public', 'prod', 'record', 'repeat', 'requirements',
+ 'return', 'sum', 'then', 'to', 'true', 'union', 'until', 'uses',
+ 'version', 'while', 'with'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Builtin),
+ (words((
+ 'range', 'array', 'set', 'list', 'mpvar', 'mpproblem', 'linctr',
+ 'nlctr', 'integer', 'string', 'real', 'boolean', 'text', 'time',
+ 'date', 'datetime', 'returned', 'Model', 'Mosel', 'counter',
+ 'xmldoc', 'is_sos1', 'is_sos2', 'is_integer', 'is_binary',
+ 'is_continuous', 'is_free', 'is_semcont', 'is_semint',
+ 'is_partint'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Type),
+ (r'(\+|\-|\*|/|=|<=|>=|\||\^|<|>|<>|\.\.|\.|:=|::|:|in|mod|div)',
+ Operator),
+ (r'[()\[\]{},;]+', Punctuation),
+ (words(FUNCTIONS, prefix=r'\b', suffix=r'\b'), Name.Function),
+ (r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
+ (r'\d+([eE][+-]?\d+)?', Number.Integer),
+ (r'[+-]?Infinity', Number.Integer),
+ (r'0[xX][0-9a-fA-F]+', Number),
+ (r'"', String.Double, 'double_quote'),
+ (r'\'', String.Single, 'single_quote'),
+ (r'(\w+|(\.(?!\.)))', Text),
+ ],
+ 'single_quote': [
+ (r'\'', String.Single, '#pop'),
+ (r'[^\']+', String.Single),
+ ],
+ 'double_quote': [
+ (r'(\\"|\\[0-7]{1,3}\D|\\[abfnrtv]|\\\\)', String.Escape),
+ (r'\"', String.Double, '#pop'),
+ (r'[^"\\]+', String.Double),
+ ],
+ }
diff --git a/pygments/lexers/ncl.py b/pygments/lexers/ncl.py
new file mode 100644
index 0000000..30cc510
--- /dev/null
+++ b/pygments/lexers/ncl.py
@@ -0,0 +1,893 @@
+"""
+ pygments.lexers.ncl
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for NCAR Command Language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['NCLLexer']
+
+
+class NCLLexer(RegexLexer):
+ """
+ Lexer for NCL code.
+
+ .. versionadded:: 2.2
+ """
+ name = 'NCL'
+ aliases = ['ncl']
+ filenames = ['*.ncl']
+ mimetypes = ['text/ncl']
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r';.*\n', Comment),
+ include('strings'),
+ include('core'),
+ (r'[a-zA-Z_]\w*', Name),
+ include('nums'),
+ (r'[\s]+', Text),
+ ],
+ 'core': [
+ # Statements
+ (words((
+ 'begin', 'break', 'continue', 'create', 'defaultapp', 'do',
+ 'else', 'end', 'external', 'exit', 'True', 'False', 'file', 'function',
+ 'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local',
+ 'new', '_Missing', 'Missing', 'noparent', 'procedure',
+ 'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop',
+ 'then', 'while'), prefix=r'\b', suffix=r'\s*\b'),
+ Keyword),
+
+ # Data Types
+ (words((
+ 'ubyte', 'uint', 'uint64', 'ulong', 'string', 'byte',
+ 'character', 'double', 'float', 'integer', 'int64', 'logical',
+ 'long', 'short', 'ushort', 'enumeric', 'numeric', 'snumeric'),
+ prefix=r'\b', suffix=r'\s*\b'),
+ Keyword.Type),
+
+ # Operators
+ (r'[\%^*+\-/<>]', Operator),
+
+ # punctuation:
+ (r'[\[\]():@$!&|.,\\{}]', Punctuation),
+ (r'[=:]', Punctuation),
+
+ # Intrinsics
+ (words((
+ 'abs', 'acos', 'addfile', 'addfiles', 'all', 'angmom_atm', 'any',
+ 'area_conserve_remap', 'area_hi2lores', 'area_poly_sphere',
+ 'asciiread', 'asciiwrite', 'asin', 'atan', 'atan2', 'attsetvalues',
+ 'avg', 'betainc', 'bin_avg', 'bin_sum', 'bw_bandpass_filter',
+ 'cancor', 'cbinread', 'cbinwrite', 'cd_calendar', 'cd_inv_calendar',
+ 'cdfbin_p', 'cdfbin_pr', 'cdfbin_s', 'cdfbin_xn', 'cdfchi_p',
+ 'cdfchi_x', 'cdfgam_p', 'cdfgam_x', 'cdfnor_p', 'cdfnor_x',
+ 'cdft_p', 'cdft_t', 'ceil', 'center_finite_diff',
+ 'center_finite_diff_n', 'cfftb', 'cfftf', 'cfftf_frq_reorder',
+ 'charactertodouble', 'charactertofloat', 'charactertointeger',
+ 'charactertolong', 'charactertoshort', 'charactertostring',
+ 'chartodouble', 'chartofloat', 'chartoint', 'chartointeger',
+ 'chartolong', 'chartoshort', 'chartostring', 'chiinv', 'clear',
+ 'color_index_to_rgba', 'conform', 'conform_dims', 'cos', 'cosh',
+ 'count_unique_values', 'covcorm', 'covcorm_xy', 'craybinnumrec',
+ 'craybinrecread', 'create_graphic', 'csa1', 'csa1d', 'csa1s',
+ 'csa1x', 'csa1xd', 'csa1xs', 'csa2', 'csa2d', 'csa2l', 'csa2ld',
+ 'csa2ls', 'csa2lx', 'csa2lxd', 'csa2lxs', 'csa2s', 'csa2x',
+ 'csa2xd', 'csa2xs', 'csa3', 'csa3d', 'csa3l', 'csa3ld', 'csa3ls',
+ 'csa3lx', 'csa3lxd', 'csa3lxs', 'csa3s', 'csa3x', 'csa3xd',
+ 'csa3xs', 'csc2s', 'csgetp', 'css2c', 'cssetp', 'cssgrid', 'csstri',
+ 'csvoro', 'cumsum', 'cz2ccm', 'datatondc', 'day_of_week',
+ 'day_of_year', 'days_in_month', 'default_fillvalue', 'delete',
+ 'depth_to_pres', 'destroy', 'determinant', 'dewtemp_trh',
+ 'dgeevx_lapack', 'dim_acumrun_n', 'dim_avg', 'dim_avg_n',
+ 'dim_avg_wgt', 'dim_avg_wgt_n', 'dim_cumsum', 'dim_cumsum_n',
+ 'dim_gamfit_n', 'dim_gbits', 'dim_max', 'dim_max_n', 'dim_median',
+ 'dim_median_n', 'dim_min', 'dim_min_n', 'dim_num', 'dim_num_n',
+ 'dim_numrun_n', 'dim_pqsort', 'dim_pqsort_n', 'dim_product',
+ 'dim_product_n', 'dim_rmsd', 'dim_rmsd_n', 'dim_rmvmean',
+ 'dim_rmvmean_n', 'dim_rmvmed', 'dim_rmvmed_n', 'dim_spi_n',
+ 'dim_standardize', 'dim_standardize_n', 'dim_stat4', 'dim_stat4_n',
+ 'dim_stddev', 'dim_stddev_n', 'dim_sum', 'dim_sum_n', 'dim_sum_wgt',
+ 'dim_sum_wgt_n', 'dim_variance', 'dim_variance_n', 'dimsizes',
+ 'doubletobyte', 'doubletochar', 'doubletocharacter',
+ 'doubletofloat', 'doubletoint', 'doubletointeger', 'doubletolong',
+ 'doubletoshort', 'dpres_hybrid_ccm', 'dpres_plevel', 'draw',
+ 'draw_color_palette', 'dsgetp', 'dsgrid2', 'dsgrid2d', 'dsgrid2s',
+ 'dsgrid3', 'dsgrid3d', 'dsgrid3s', 'dspnt2', 'dspnt2d', 'dspnt2s',
+ 'dspnt3', 'dspnt3d', 'dspnt3s', 'dssetp', 'dtrend', 'dtrend_msg',
+ 'dtrend_msg_n', 'dtrend_n', 'dtrend_quadratic',
+ 'dtrend_quadratic_msg_n', 'dv2uvf', 'dv2uvg', 'dz_height',
+ 'echo_off', 'echo_on', 'eof2data', 'eof_varimax', 'eofcor',
+ 'eofcor_pcmsg', 'eofcor_ts', 'eofcov', 'eofcov_pcmsg', 'eofcov_ts',
+ 'eofunc', 'eofunc_ts', 'eofunc_varimax', 'equiv_sample_size', 'erf',
+ 'erfc', 'esacr', 'esacv', 'esccr', 'esccv', 'escorc', 'escorc_n',
+ 'escovc', 'exit', 'exp', 'exp_tapersh', 'exp_tapersh_wgts',
+ 'exp_tapershC', 'ezfftb', 'ezfftb_n', 'ezfftf', 'ezfftf_n',
+ 'f2fosh', 'f2foshv', 'f2fsh', 'f2fshv', 'f2gsh', 'f2gshv', 'fabs',
+ 'fbindirread', 'fbindirwrite', 'fbinnumrec', 'fbinread',
+ 'fbinrecread', 'fbinrecwrite', 'fbinwrite', 'fft2db', 'fft2df',
+ 'fftshift', 'fileattdef', 'filechunkdimdef', 'filedimdef',
+ 'fileexists', 'filegrpdef', 'filevarattdef', 'filevarchunkdef',
+ 'filevarcompressleveldef', 'filevardef', 'filevardimsizes',
+ 'filwgts_lancos', 'filwgts_lanczos', 'filwgts_normal',
+ 'floattobyte', 'floattochar', 'floattocharacter', 'floattoint',
+ 'floattointeger', 'floattolong', 'floattoshort', 'floor',
+ 'fluxEddy', 'fo2fsh', 'fo2fshv', 'fourier_info', 'frame', 'fspan',
+ 'ftcurv', 'ftcurvd', 'ftcurvi', 'ftcurvp', 'ftcurvpi', 'ftcurvps',
+ 'ftcurvs', 'ftest', 'ftgetp', 'ftkurv', 'ftkurvd', 'ftkurvp',
+ 'ftkurvpd', 'ftsetp', 'ftsurf', 'g2fsh', 'g2fshv', 'g2gsh',
+ 'g2gshv', 'gamma', 'gammainc', 'gaus', 'gaus_lobat',
+ 'gaus_lobat_wgt', 'gc_aangle', 'gc_clkwise', 'gc_dangle',
+ 'gc_inout', 'gc_latlon', 'gc_onarc', 'gc_pnt2gc', 'gc_qarea',
+ 'gc_tarea', 'generate_2d_array', 'get_color_index',
+ 'get_color_rgba', 'get_cpu_time', 'get_isolines', 'get_ncl_version',
+ 'get_script_name', 'get_script_prefix_name', 'get_sphere_radius',
+ 'get_unique_values', 'getbitsone', 'getenv', 'getfiledimsizes',
+ 'getfilegrpnames', 'getfilepath', 'getfilevaratts',
+ 'getfilevarchunkdimsizes', 'getfilevardims', 'getfilevardimsizes',
+ 'getfilevarnames', 'getfilevartypes', 'getvaratts', 'getvardims',
+ 'gradsf', 'gradsg', 'greg2jul', 'grid2triple', 'hlsrgb', 'hsvrgb',
+ 'hydro', 'hyi2hyo', 'idsfft', 'igradsf', 'igradsg', 'ilapsf',
+ 'ilapsg', 'ilapvf', 'ilapvg', 'ind', 'ind_resolve', 'int2p',
+ 'int2p_n', 'integertobyte', 'integertochar', 'integertocharacter',
+ 'integertoshort', 'inttobyte', 'inttochar', 'inttoshort',
+ 'inverse_matrix', 'isatt', 'isbigendian', 'isbyte', 'ischar',
+ 'iscoord', 'isdefined', 'isdim', 'isdimnamed', 'isdouble',
+ 'isenumeric', 'isfile', 'isfilepresent', 'isfilevar',
+ 'isfilevaratt', 'isfilevarcoord', 'isfilevardim', 'isfloat',
+ 'isfunc', 'isgraphic', 'isint', 'isint64', 'isinteger',
+ 'isleapyear', 'islogical', 'islong', 'ismissing', 'isnan_ieee',
+ 'isnumeric', 'ispan', 'isproc', 'isshort', 'issnumeric', 'isstring',
+ 'isubyte', 'isuint', 'isuint64', 'isulong', 'isunlimited',
+ 'isunsigned', 'isushort', 'isvar', 'jul2greg', 'kmeans_as136',
+ 'kolsm2_n', 'kron_product', 'lapsf', 'lapsg', 'lapvf', 'lapvg',
+ 'latlon2utm', 'lclvl', 'lderuvf', 'lderuvg', 'linint1', 'linint1_n',
+ 'linint2', 'linint2_points', 'linmsg', 'linmsg_n', 'linrood_latwgt',
+ 'linrood_wgt', 'list_files', 'list_filevars', 'list_hlus',
+ 'list_procfuncs', 'list_vars', 'ListAppend', 'ListCount',
+ 'ListGetType', 'ListIndex', 'ListIndexFromName', 'ListPop',
+ 'ListPush', 'ListSetType', 'loadscript', 'local_max', 'local_min',
+ 'log', 'log10', 'longtobyte', 'longtochar', 'longtocharacter',
+ 'longtoint', 'longtointeger', 'longtoshort', 'lspoly', 'lspoly_n',
+ 'mask', 'max', 'maxind', 'min', 'minind', 'mixed_layer_depth',
+ 'mixhum_ptd', 'mixhum_ptrh', 'mjo_cross_coh2pha',
+ 'mjo_cross_segment', 'moc_globe_atl', 'monthday', 'natgrid',
+ 'natgridd', 'natgrids', 'ncargpath', 'ncargversion', 'ndctodata',
+ 'ndtooned', 'new', 'NewList', 'ngezlogo', 'nggcog', 'nggetp',
+ 'nglogo', 'ngsetp', 'NhlAddAnnotation', 'NhlAddData',
+ 'NhlAddOverlay', 'NhlAddPrimitive', 'NhlAppGetDefaultParentId',
+ 'NhlChangeWorkstation', 'NhlClassName', 'NhlClearWorkstation',
+ 'NhlDataPolygon', 'NhlDataPolyline', 'NhlDataPolymarker',
+ 'NhlDataToNDC', 'NhlDestroy', 'NhlDraw', 'NhlFrame', 'NhlFreeColor',
+ 'NhlGetBB', 'NhlGetClassResources', 'NhlGetErrorObjectId',
+ 'NhlGetNamedColorIndex', 'NhlGetParentId',
+ 'NhlGetParentWorkstation', 'NhlGetWorkspaceObjectId',
+ 'NhlIsAllocatedColor', 'NhlIsApp', 'NhlIsDataComm', 'NhlIsDataItem',
+ 'NhlIsDataSpec', 'NhlIsTransform', 'NhlIsView', 'NhlIsWorkstation',
+ 'NhlName', 'NhlNDCPolygon', 'NhlNDCPolyline', 'NhlNDCPolymarker',
+ 'NhlNDCToData', 'NhlNewColor', 'NhlNewDashPattern', 'NhlNewMarker',
+ 'NhlPalGetDefined', 'NhlRemoveAnnotation', 'NhlRemoveData',
+ 'NhlRemoveOverlay', 'NhlRemovePrimitive', 'NhlSetColor',
+ 'NhlSetDashPattern', 'NhlSetMarker', 'NhlUpdateData',
+ 'NhlUpdateWorkstation', 'nice_mnmxintvl', 'nngetaspectd',
+ 'nngetaspects', 'nngetp', 'nngetsloped', 'nngetslopes', 'nngetwts',
+ 'nngetwtsd', 'nnpnt', 'nnpntd', 'nnpntend', 'nnpntendd',
+ 'nnpntinit', 'nnpntinitd', 'nnpntinits', 'nnpnts', 'nnsetp', 'num',
+ 'obj_anal_ic', 'omega_ccm', 'onedtond', 'overlay', 'paleo_outline',
+ 'pdfxy_bin', 'poisson_grid_fill', 'pop_remap', 'potmp_insitu_ocn',
+ 'prcwater_dp', 'pres2hybrid', 'pres_hybrid_ccm', 'pres_sigma',
+ 'print', 'print_table', 'printFileVarSummary', 'printVarSummary',
+ 'product', 'pslec', 'pslhor', 'pslhyp', 'qsort', 'rand',
+ 'random_chi', 'random_gamma', 'random_normal', 'random_setallseed',
+ 'random_uniform', 'rcm2points', 'rcm2rgrid', 'rdsstoi',
+ 'read_colormap_file', 'reg_multlin', 'regcoef', 'regCoef_n',
+ 'regline', 'relhum', 'replace_ieeenan', 'reshape', 'reshape_ind',
+ 'rgba_to_color_index', 'rgbhls', 'rgbhsv', 'rgbyiq', 'rgrid2rcm',
+ 'rhomb_trunc', 'rip_cape_2d', 'rip_cape_3d', 'round', 'rtest',
+ 'runave', 'runave_n', 'set_default_fillvalue', 'set_sphere_radius',
+ 'setfileoption', 'sfvp2uvf', 'sfvp2uvg', 'shaec', 'shagc',
+ 'shgetnp', 'shgetp', 'shgrid', 'shorttobyte', 'shorttochar',
+ 'shorttocharacter', 'show_ascii', 'shsec', 'shsetp', 'shsgc',
+ 'shsgc_R42', 'sigma2hybrid', 'simpeq', 'simpne', 'sin',
+ 'sindex_yrmo', 'sinh', 'sizeof', 'sleep', 'smth9', 'snindex_yrmo',
+ 'solve_linsys', 'span_color_indexes', 'span_color_rgba',
+ 'sparse_matrix_mult', 'spcorr', 'spcorr_n', 'specx_anal',
+ 'specxy_anal', 'spei', 'sprintf', 'sprinti', 'sqrt', 'sqsort',
+ 'srand', 'stat2', 'stat4', 'stat_medrng', 'stat_trim',
+ 'status_exit', 'stdatmus_p2tdz', 'stdatmus_z2tdp', 'stddev',
+ 'str_capital', 'str_concat', 'str_fields_count', 'str_get_cols',
+ 'str_get_dq', 'str_get_field', 'str_get_nl', 'str_get_sq',
+ 'str_get_tab', 'str_index_of_substr', 'str_insert', 'str_is_blank',
+ 'str_join', 'str_left_strip', 'str_lower', 'str_match',
+ 'str_match_ic', 'str_match_ic_regex', 'str_match_ind',
+ 'str_match_ind_ic', 'str_match_ind_ic_regex', 'str_match_ind_regex',
+ 'str_match_regex', 'str_right_strip', 'str_split',
+ 'str_split_by_length', 'str_split_csv', 'str_squeeze', 'str_strip',
+ 'str_sub_str', 'str_switch', 'str_upper', 'stringtochar',
+ 'stringtocharacter', 'stringtodouble', 'stringtofloat',
+ 'stringtoint', 'stringtointeger', 'stringtolong', 'stringtoshort',
+ 'strlen', 'student_t', 'sum', 'svd_lapack', 'svdcov', 'svdcov_sv',
+ 'svdstd', 'svdstd_sv', 'system', 'systemfunc', 'tan', 'tanh',
+ 'taper', 'taper_n', 'tdclrs', 'tdctri', 'tdcudp', 'tdcurv',
+ 'tddtri', 'tdez2d', 'tdez3d', 'tdgetp', 'tdgrds', 'tdgrid',
+ 'tdgtrs', 'tdinit', 'tditri', 'tdlbla', 'tdlblp', 'tdlbls',
+ 'tdline', 'tdlndp', 'tdlnpa', 'tdlpdp', 'tdmtri', 'tdotri',
+ 'tdpara', 'tdplch', 'tdprpa', 'tdprpi', 'tdprpt', 'tdsetp',
+ 'tdsort', 'tdstri', 'tdstrs', 'tdttri', 'thornthwaite', 'tobyte',
+ 'tochar', 'todouble', 'tofloat', 'toint', 'toint64', 'tointeger',
+ 'tolong', 'toshort', 'tosigned', 'tostring', 'tostring_with_format',
+ 'totype', 'toubyte', 'touint', 'touint64', 'toulong', 'tounsigned',
+ 'toushort', 'trend_manken', 'tri_trunc', 'triple2grid',
+ 'triple2grid2d', 'trop_wmo', 'ttest', 'typeof', 'undef',
+ 'unique_string', 'update', 'ushorttoint', 'ut_calendar',
+ 'ut_inv_calendar', 'utm2latlon', 'uv2dv_cfd', 'uv2dvf', 'uv2dvg',
+ 'uv2sfvpf', 'uv2sfvpg', 'uv2vr_cfd', 'uv2vrdvf', 'uv2vrdvg',
+ 'uv2vrf', 'uv2vrg', 'v5d_close', 'v5d_create', 'v5d_setLowLev',
+ 'v5d_setUnits', 'v5d_write', 'v5d_write_var', 'variance', 'vhaec',
+ 'vhagc', 'vhsec', 'vhsgc', 'vibeta', 'vinth2p', 'vinth2p_ecmwf',
+ 'vinth2p_ecmwf_nodes', 'vinth2p_nodes', 'vintp2p_ecmwf', 'vr2uvf',
+ 'vr2uvg', 'vrdv2uvf', 'vrdv2uvg', 'wavelet', 'wavelet_default',
+ 'weibull', 'wgt_area_smooth', 'wgt_areaave', 'wgt_areaave2',
+ 'wgt_arearmse', 'wgt_arearmse2', 'wgt_areasum2', 'wgt_runave',
+ 'wgt_runave_n', 'wgt_vert_avg_beta', 'wgt_volave', 'wgt_volave_ccm',
+ 'wgt_volrmse', 'wgt_volrmse_ccm', 'where', 'wk_smooth121', 'wmbarb',
+ 'wmbarbmap', 'wmdrft', 'wmgetp', 'wmlabs', 'wmsetp', 'wmstnm',
+ 'wmvect', 'wmvectmap', 'wmvlbl', 'wrf_avo', 'wrf_cape_2d',
+ 'wrf_cape_3d', 'wrf_dbz', 'wrf_eth', 'wrf_helicity', 'wrf_ij_to_ll',
+ 'wrf_interp_1d', 'wrf_interp_2d_xy', 'wrf_interp_3d_z',
+ 'wrf_latlon_to_ij', 'wrf_ll_to_ij', 'wrf_omega', 'wrf_pvo',
+ 'wrf_rh', 'wrf_slp', 'wrf_smooth_2d', 'wrf_td', 'wrf_tk',
+ 'wrf_updraft_helicity', 'wrf_uvmet', 'wrf_virtual_temp',
+ 'wrf_wetbulb', 'wrf_wps_close_int', 'wrf_wps_open_int',
+ 'wrf_wps_rddata_int', 'wrf_wps_rdhead_int', 'wrf_wps_read_int',
+ 'wrf_wps_write_int', 'write_matrix', 'write_table', 'yiqrgb',
+ 'z2geouv', 'zonal_mpsi', 'addfiles_GetVar', 'advect_variable',
+ 'area_conserve_remap_Wrap', 'area_hi2lores_Wrap',
+ 'array_append_record', 'assignFillValue', 'byte2flt',
+ 'byte2flt_hdf', 'calcDayAnomTLL', 'calcMonAnomLLLT',
+ 'calcMonAnomLLT', 'calcMonAnomTLL', 'calcMonAnomTLLL',
+ 'calculate_monthly_values', 'cd_convert', 'changeCase',
+ 'changeCaseChar', 'clmDayTLL', 'clmDayTLLL', 'clmMon2clmDay',
+ 'clmMonLLLT', 'clmMonLLT', 'clmMonTLL', 'clmMonTLLL', 'closest_val',
+ 'copy_VarAtts', 'copy_VarCoords', 'copy_VarCoords_1',
+ 'copy_VarCoords_2', 'copy_VarMeta', 'copyatt', 'crossp3',
+ 'cshstringtolist', 'cssgrid_Wrap', 'dble2flt', 'decimalPlaces',
+ 'delete_VarAtts', 'dim_avg_n_Wrap', 'dim_avg_wgt_n_Wrap',
+ 'dim_avg_wgt_Wrap', 'dim_avg_Wrap', 'dim_cumsum_n_Wrap',
+ 'dim_cumsum_Wrap', 'dim_max_n_Wrap', 'dim_min_n_Wrap',
+ 'dim_rmsd_n_Wrap', 'dim_rmsd_Wrap', 'dim_rmvmean_n_Wrap',
+ 'dim_rmvmean_Wrap', 'dim_rmvmed_n_Wrap', 'dim_rmvmed_Wrap',
+ 'dim_standardize_n_Wrap', 'dim_standardize_Wrap',
+ 'dim_stddev_n_Wrap', 'dim_stddev_Wrap', 'dim_sum_n_Wrap',
+ 'dim_sum_wgt_n_Wrap', 'dim_sum_wgt_Wrap', 'dim_sum_Wrap',
+ 'dim_variance_n_Wrap', 'dim_variance_Wrap', 'dpres_plevel_Wrap',
+ 'dtrend_leftdim', 'dv2uvF_Wrap', 'dv2uvG_Wrap', 'eof_north',
+ 'eofcor_Wrap', 'eofcov_Wrap', 'eofunc_north', 'eofunc_ts_Wrap',
+ 'eofunc_varimax_reorder', 'eofunc_varimax_Wrap', 'eofunc_Wrap',
+ 'epsZero', 'f2fosh_Wrap', 'f2foshv_Wrap', 'f2fsh_Wrap',
+ 'f2fshv_Wrap', 'f2gsh_Wrap', 'f2gshv_Wrap', 'fbindirSwap',
+ 'fbinseqSwap1', 'fbinseqSwap2', 'flt2dble', 'flt2string',
+ 'fo2fsh_Wrap', 'fo2fshv_Wrap', 'g2fsh_Wrap', 'g2fshv_Wrap',
+ 'g2gsh_Wrap', 'g2gshv_Wrap', 'generate_resample_indices',
+ 'generate_sample_indices', 'generate_unique_indices',
+ 'genNormalDist', 'get1Dindex', 'get1Dindex_Collapse',
+ 'get1Dindex_Exclude', 'get_file_suffix', 'GetFillColor',
+ 'GetFillColorIndex', 'getFillValue', 'getind_latlon2d',
+ 'getVarDimNames', 'getVarFillValue', 'grib_stime2itime',
+ 'hyi2hyo_Wrap', 'ilapsF_Wrap', 'ilapsG_Wrap', 'ind_nearest_coord',
+ 'indStrSubset', 'int2dble', 'int2flt', 'int2p_n_Wrap', 'int2p_Wrap',
+ 'isMonotonic', 'isStrSubset', 'latGau', 'latGauWgt', 'latGlobeF',
+ 'latGlobeFo', 'latRegWgt', 'linint1_n_Wrap', 'linint1_Wrap',
+ 'linint2_points_Wrap', 'linint2_Wrap', 'local_max_1d',
+ 'local_min_1d', 'lonFlip', 'lonGlobeF', 'lonGlobeFo', 'lonPivot',
+ 'merge_levels_sfc', 'mod', 'month_to_annual',
+ 'month_to_annual_weighted', 'month_to_season', 'month_to_season12',
+ 'month_to_seasonN', 'monthly_total_to_daily_mean', 'nameDim',
+ 'natgrid_Wrap', 'NewCosWeight', 'niceLatLon2D', 'NormCosWgtGlobe',
+ 'numAsciiCol', 'numAsciiRow', 'numeric2int',
+ 'obj_anal_ic_deprecated', 'obj_anal_ic_Wrap', 'omega_ccm_driver',
+ 'omega_to_w', 'oneDtostring', 'pack_values', 'pattern_cor', 'pdfx',
+ 'pdfxy', 'pdfxy_conform', 'pot_temp', 'pot_vort_hybrid',
+ 'pot_vort_isobaric', 'pres2hybrid_Wrap', 'print_clock',
+ 'printMinMax', 'quadroots', 'rcm2points_Wrap', 'rcm2rgrid_Wrap',
+ 'readAsciiHead', 'readAsciiTable', 'reg_multlin_stats',
+ 'region_ind', 'regline_stats', 'relhum_ttd', 'replaceSingleChar',
+ 'RGBtoCmap', 'rgrid2rcm_Wrap', 'rho_mwjf', 'rm_single_dims',
+ 'rmAnnCycle1D', 'rmInsufData', 'rmMonAnnCycLLLT', 'rmMonAnnCycLLT',
+ 'rmMonAnnCycTLL', 'runave_n_Wrap', 'runave_Wrap', 'short2flt',
+ 'short2flt_hdf', 'shsgc_R42_Wrap', 'sign_f90', 'sign_matlab',
+ 'smth9_Wrap', 'smthClmDayTLL', 'smthClmDayTLLL', 'SqrtCosWeight',
+ 'stat_dispersion', 'static_stability', 'stdMonLLLT', 'stdMonLLT',
+ 'stdMonTLL', 'stdMonTLLL', 'symMinMaxPlt', 'table_attach_columns',
+ 'table_attach_rows', 'time_to_newtime', 'transpose',
+ 'triple2grid_Wrap', 'ut_convert', 'uv2dvF_Wrap', 'uv2dvG_Wrap',
+ 'uv2vrF_Wrap', 'uv2vrG_Wrap', 'vr2uvF_Wrap', 'vr2uvG_Wrap',
+ 'w_to_omega', 'wallClockElapseTime', 'wave_number_spc',
+ 'wgt_areaave_Wrap', 'wgt_runave_leftdim', 'wgt_runave_n_Wrap',
+ 'wgt_runave_Wrap', 'wgt_vertical_n', 'wind_component',
+ 'wind_direction', 'yyyyddd_to_yyyymmdd', 'yyyymm_time',
+ 'yyyymm_to_yyyyfrac', 'yyyymmdd_time', 'yyyymmdd_to_yyyyddd',
+ 'yyyymmdd_to_yyyyfrac', 'yyyymmddhh_time', 'yyyymmddhh_to_yyyyfrac',
+ 'zonal_mpsi_Wrap', 'zonalAve', 'calendar_decode2', 'cd_string',
+ 'kf_filter', 'run_cor', 'time_axis_labels', 'ut_string',
+ 'wrf_contour', 'wrf_map', 'wrf_map_overlay', 'wrf_map_overlays',
+ 'wrf_map_resources', 'wrf_map_zoom', 'wrf_overlay', 'wrf_overlays',
+ 'wrf_user_getvar', 'wrf_user_ij_to_ll', 'wrf_user_intrp2d',
+ 'wrf_user_intrp3d', 'wrf_user_latlon_to_ij', 'wrf_user_list_times',
+ 'wrf_user_ll_to_ij', 'wrf_user_unstagger', 'wrf_user_vert_interp',
+ 'wrf_vector', 'gsn_add_annotation', 'gsn_add_polygon',
+ 'gsn_add_polyline', 'gsn_add_polymarker',
+ 'gsn_add_shapefile_polygons', 'gsn_add_shapefile_polylines',
+ 'gsn_add_shapefile_polymarkers', 'gsn_add_text', 'gsn_attach_plots',
+ 'gsn_blank_plot', 'gsn_contour', 'gsn_contour_map',
+ 'gsn_contour_shade', 'gsn_coordinates', 'gsn_create_labelbar',
+ 'gsn_create_legend', 'gsn_create_text',
+ 'gsn_csm_attach_zonal_means', 'gsn_csm_blank_plot',
+ 'gsn_csm_contour', 'gsn_csm_contour_map', 'gsn_csm_contour_map_ce',
+ 'gsn_csm_contour_map_overlay', 'gsn_csm_contour_map_polar',
+ 'gsn_csm_hov', 'gsn_csm_lat_time', 'gsn_csm_map', 'gsn_csm_map_ce',
+ 'gsn_csm_map_polar', 'gsn_csm_pres_hgt',
+ 'gsn_csm_pres_hgt_streamline', 'gsn_csm_pres_hgt_vector',
+ 'gsn_csm_streamline', 'gsn_csm_streamline_contour_map',
+ 'gsn_csm_streamline_contour_map_ce',
+ 'gsn_csm_streamline_contour_map_polar', 'gsn_csm_streamline_map',
+ 'gsn_csm_streamline_map_ce', 'gsn_csm_streamline_map_polar',
+ 'gsn_csm_streamline_scalar', 'gsn_csm_streamline_scalar_map',
+ 'gsn_csm_streamline_scalar_map_ce',
+ 'gsn_csm_streamline_scalar_map_polar', 'gsn_csm_time_lat',
+ 'gsn_csm_vector', 'gsn_csm_vector_map', 'gsn_csm_vector_map_ce',
+ 'gsn_csm_vector_map_polar', 'gsn_csm_vector_scalar',
+ 'gsn_csm_vector_scalar_map', 'gsn_csm_vector_scalar_map_ce',
+ 'gsn_csm_vector_scalar_map_polar', 'gsn_csm_x2y', 'gsn_csm_x2y2',
+ 'gsn_csm_xy', 'gsn_csm_xy2', 'gsn_csm_xy3', 'gsn_csm_y',
+ 'gsn_define_colormap', 'gsn_draw_colormap', 'gsn_draw_named_colors',
+ 'gsn_histogram', 'gsn_labelbar_ndc', 'gsn_legend_ndc', 'gsn_map',
+ 'gsn_merge_colormaps', 'gsn_open_wks', 'gsn_panel', 'gsn_polygon',
+ 'gsn_polygon_ndc', 'gsn_polyline', 'gsn_polyline_ndc',
+ 'gsn_polymarker', 'gsn_polymarker_ndc', 'gsn_retrieve_colormap',
+ 'gsn_reverse_colormap', 'gsn_streamline', 'gsn_streamline_map',
+ 'gsn_streamline_scalar', 'gsn_streamline_scalar_map', 'gsn_table',
+ 'gsn_text', 'gsn_text_ndc', 'gsn_vector', 'gsn_vector_map',
+ 'gsn_vector_scalar', 'gsn_vector_scalar_map', 'gsn_xy', 'gsn_y',
+ 'hsv2rgb', 'maximize_output', 'namedcolor2rgb', 'namedcolor2rgba',
+ 'reset_device_coordinates', 'span_named_colors'), prefix=r'\b'),
+ Name.Builtin),
+
+ # Resources
+ (words((
+ 'amDataXF', 'amDataYF', 'amJust', 'amOn', 'amOrthogonalPosF',
+ 'amParallelPosF', 'amResizeNotify', 'amSide', 'amTrackData',
+ 'amViewId', 'amZone', 'appDefaultParent', 'appFileSuffix',
+ 'appResources', 'appSysDir', 'appUsrDir', 'caCopyArrays',
+ 'caXArray', 'caXCast', 'caXMaxV', 'caXMinV', 'caXMissingV',
+ 'caYArray', 'caYCast', 'caYMaxV', 'caYMinV', 'caYMissingV',
+ 'cnCellFillEdgeColor', 'cnCellFillMissingValEdgeColor',
+ 'cnConpackParams', 'cnConstFEnableFill', 'cnConstFLabelAngleF',
+ 'cnConstFLabelBackgroundColor', 'cnConstFLabelConstantSpacingF',
+ 'cnConstFLabelFont', 'cnConstFLabelFontAspectF',
+ 'cnConstFLabelFontColor', 'cnConstFLabelFontHeightF',
+ 'cnConstFLabelFontQuality', 'cnConstFLabelFontThicknessF',
+ 'cnConstFLabelFormat', 'cnConstFLabelFuncCode', 'cnConstFLabelJust',
+ 'cnConstFLabelOn', 'cnConstFLabelOrthogonalPosF',
+ 'cnConstFLabelParallelPosF', 'cnConstFLabelPerimColor',
+ 'cnConstFLabelPerimOn', 'cnConstFLabelPerimSpaceF',
+ 'cnConstFLabelPerimThicknessF', 'cnConstFLabelSide',
+ 'cnConstFLabelString', 'cnConstFLabelTextDirection',
+ 'cnConstFLabelZone', 'cnConstFUseInfoLabelRes',
+ 'cnExplicitLabelBarLabelsOn', 'cnExplicitLegendLabelsOn',
+ 'cnExplicitLineLabelsOn', 'cnFillBackgroundColor', 'cnFillColor',
+ 'cnFillColors', 'cnFillDotSizeF', 'cnFillDrawOrder', 'cnFillMode',
+ 'cnFillOn', 'cnFillOpacityF', 'cnFillPalette', 'cnFillPattern',
+ 'cnFillPatterns', 'cnFillScaleF', 'cnFillScales', 'cnFixFillBleed',
+ 'cnGridBoundFillColor', 'cnGridBoundFillPattern',
+ 'cnGridBoundFillScaleF', 'cnGridBoundPerimColor',
+ 'cnGridBoundPerimDashPattern', 'cnGridBoundPerimOn',
+ 'cnGridBoundPerimThicknessF', 'cnHighLabelAngleF',
+ 'cnHighLabelBackgroundColor', 'cnHighLabelConstantSpacingF',
+ 'cnHighLabelCount', 'cnHighLabelFont', 'cnHighLabelFontAspectF',
+ 'cnHighLabelFontColor', 'cnHighLabelFontHeightF',
+ 'cnHighLabelFontQuality', 'cnHighLabelFontThicknessF',
+ 'cnHighLabelFormat', 'cnHighLabelFuncCode', 'cnHighLabelPerimColor',
+ 'cnHighLabelPerimOn', 'cnHighLabelPerimSpaceF',
+ 'cnHighLabelPerimThicknessF', 'cnHighLabelString', 'cnHighLabelsOn',
+ 'cnHighLowLabelOverlapMode', 'cnHighUseLineLabelRes',
+ 'cnInfoLabelAngleF', 'cnInfoLabelBackgroundColor',
+ 'cnInfoLabelConstantSpacingF', 'cnInfoLabelFont',
+ 'cnInfoLabelFontAspectF', 'cnInfoLabelFontColor',
+ 'cnInfoLabelFontHeightF', 'cnInfoLabelFontQuality',
+ 'cnInfoLabelFontThicknessF', 'cnInfoLabelFormat',
+ 'cnInfoLabelFuncCode', 'cnInfoLabelJust', 'cnInfoLabelOn',
+ 'cnInfoLabelOrthogonalPosF', 'cnInfoLabelParallelPosF',
+ 'cnInfoLabelPerimColor', 'cnInfoLabelPerimOn',
+ 'cnInfoLabelPerimSpaceF', 'cnInfoLabelPerimThicknessF',
+ 'cnInfoLabelSide', 'cnInfoLabelString', 'cnInfoLabelTextDirection',
+ 'cnInfoLabelZone', 'cnLabelBarEndLabelsOn', 'cnLabelBarEndStyle',
+ 'cnLabelDrawOrder', 'cnLabelMasking', 'cnLabelScaleFactorF',
+ 'cnLabelScaleValueF', 'cnLabelScalingMode', 'cnLegendLevelFlags',
+ 'cnLevelCount', 'cnLevelFlag', 'cnLevelFlags', 'cnLevelSelectionMode',
+ 'cnLevelSpacingF', 'cnLevels', 'cnLineColor', 'cnLineColors',
+ 'cnLineDashPattern', 'cnLineDashPatterns', 'cnLineDashSegLenF',
+ 'cnLineDrawOrder', 'cnLineLabelAngleF', 'cnLineLabelBackgroundColor',
+ 'cnLineLabelConstantSpacingF', 'cnLineLabelCount',
+ 'cnLineLabelDensityF', 'cnLineLabelFont', 'cnLineLabelFontAspectF',
+ 'cnLineLabelFontColor', 'cnLineLabelFontColors',
+ 'cnLineLabelFontHeightF', 'cnLineLabelFontQuality',
+ 'cnLineLabelFontThicknessF', 'cnLineLabelFormat',
+ 'cnLineLabelFuncCode', 'cnLineLabelInterval', 'cnLineLabelPerimColor',
+ 'cnLineLabelPerimOn', 'cnLineLabelPerimSpaceF',
+ 'cnLineLabelPerimThicknessF', 'cnLineLabelPlacementMode',
+ 'cnLineLabelStrings', 'cnLineLabelsOn', 'cnLinePalette',
+ 'cnLineThicknessF', 'cnLineThicknesses', 'cnLinesOn',
+ 'cnLowLabelAngleF', 'cnLowLabelBackgroundColor',
+ 'cnLowLabelConstantSpacingF', 'cnLowLabelCount', 'cnLowLabelFont',
+ 'cnLowLabelFontAspectF', 'cnLowLabelFontColor',
+ 'cnLowLabelFontHeightF', 'cnLowLabelFontQuality',
+ 'cnLowLabelFontThicknessF', 'cnLowLabelFormat', 'cnLowLabelFuncCode',
+ 'cnLowLabelPerimColor', 'cnLowLabelPerimOn', 'cnLowLabelPerimSpaceF',
+ 'cnLowLabelPerimThicknessF', 'cnLowLabelString', 'cnLowLabelsOn',
+ 'cnLowUseHighLabelRes', 'cnMaxDataValueFormat', 'cnMaxLevelCount',
+ 'cnMaxLevelValF', 'cnMaxPointDistanceF', 'cnMinLevelValF',
+ 'cnMissingValFillColor', 'cnMissingValFillPattern',
+ 'cnMissingValFillScaleF', 'cnMissingValPerimColor',
+ 'cnMissingValPerimDashPattern', 'cnMissingValPerimGridBoundOn',
+ 'cnMissingValPerimOn', 'cnMissingValPerimThicknessF',
+ 'cnMonoFillColor', 'cnMonoFillPattern', 'cnMonoFillScale',
+ 'cnMonoLevelFlag', 'cnMonoLineColor', 'cnMonoLineDashPattern',
+ 'cnMonoLineLabelFontColor', 'cnMonoLineThickness', 'cnNoDataLabelOn',
+ 'cnNoDataLabelString', 'cnOutOfRangeFillColor',
+ 'cnOutOfRangeFillPattern', 'cnOutOfRangeFillScaleF',
+ 'cnOutOfRangePerimColor', 'cnOutOfRangePerimDashPattern',
+ 'cnOutOfRangePerimOn', 'cnOutOfRangePerimThicknessF',
+ 'cnRasterCellSizeF', 'cnRasterMinCellSizeF', 'cnRasterModeOn',
+ 'cnRasterSampleFactorF', 'cnRasterSmoothingOn', 'cnScalarFieldData',
+ 'cnSmoothingDistanceF', 'cnSmoothingOn', 'cnSmoothingTensionF',
+ 'cnSpanFillPalette', 'cnSpanLinePalette', 'ctCopyTables',
+ 'ctXElementSize', 'ctXMaxV', 'ctXMinV', 'ctXMissingV', 'ctXTable',
+ 'ctXTableLengths', 'ctXTableType', 'ctYElementSize', 'ctYMaxV',
+ 'ctYMinV', 'ctYMissingV', 'ctYTable', 'ctYTableLengths',
+ 'ctYTableType', 'dcDelayCompute', 'errBuffer',
+ 'errFileName', 'errFilePtr', 'errLevel', 'errPrint', 'errUnitNumber',
+ 'gsClipOn', 'gsColors', 'gsEdgeColor', 'gsEdgeDashPattern',
+ 'gsEdgeDashSegLenF', 'gsEdgeThicknessF', 'gsEdgesOn',
+ 'gsFillBackgroundColor', 'gsFillColor', 'gsFillDotSizeF',
+ 'gsFillIndex', 'gsFillLineThicknessF', 'gsFillOpacityF',
+ 'gsFillScaleF', 'gsFont', 'gsFontAspectF', 'gsFontColor',
+ 'gsFontHeightF', 'gsFontOpacityF', 'gsFontQuality',
+ 'gsFontThicknessF', 'gsLineColor', 'gsLineDashPattern',
+ 'gsLineDashSegLenF', 'gsLineLabelConstantSpacingF', 'gsLineLabelFont',
+ 'gsLineLabelFontAspectF', 'gsLineLabelFontColor',
+ 'gsLineLabelFontHeightF', 'gsLineLabelFontQuality',
+ 'gsLineLabelFontThicknessF', 'gsLineLabelFuncCode',
+ 'gsLineLabelString', 'gsLineOpacityF', 'gsLineThicknessF',
+ 'gsMarkerColor', 'gsMarkerIndex', 'gsMarkerOpacityF', 'gsMarkerSizeF',
+ 'gsMarkerThicknessF', 'gsSegments', 'gsTextAngleF',
+ 'gsTextConstantSpacingF', 'gsTextDirection', 'gsTextFuncCode',
+ 'gsTextJustification', 'gsnAboveYRefLineBarColors',
+ 'gsnAboveYRefLineBarFillScales', 'gsnAboveYRefLineBarPatterns',
+ 'gsnAboveYRefLineColor', 'gsnAddCyclic', 'gsnAttachBorderOn',
+ 'gsnAttachPlotsXAxis', 'gsnBelowYRefLineBarColors',
+ 'gsnBelowYRefLineBarFillScales', 'gsnBelowYRefLineBarPatterns',
+ 'gsnBelowYRefLineColor', 'gsnBoxMargin', 'gsnCenterString',
+ 'gsnCenterStringFontColor', 'gsnCenterStringFontHeightF',
+ 'gsnCenterStringFuncCode', 'gsnCenterStringOrthogonalPosF',
+ 'gsnCenterStringParallelPosF', 'gsnContourLineThicknessesScale',
+ 'gsnContourNegLineDashPattern', 'gsnContourPosLineDashPattern',
+ 'gsnContourZeroLineThicknessF', 'gsnDebugWriteFileName', 'gsnDraw',
+ 'gsnFrame', 'gsnHistogramBarWidthPercent', 'gsnHistogramBinIntervals',
+ 'gsnHistogramBinMissing', 'gsnHistogramBinWidth',
+ 'gsnHistogramClassIntervals', 'gsnHistogramCompare',
+ 'gsnHistogramComputePercentages',
+ 'gsnHistogramComputePercentagesNoMissing',
+ 'gsnHistogramDiscreteBinValues', 'gsnHistogramDiscreteClassValues',
+ 'gsnHistogramHorizontal', 'gsnHistogramMinMaxBinsOn',
+ 'gsnHistogramNumberOfBins', 'gsnHistogramPercentSign',
+ 'gsnHistogramSelectNiceIntervals', 'gsnLeftString',
+ 'gsnLeftStringFontColor', 'gsnLeftStringFontHeightF',
+ 'gsnLeftStringFuncCode', 'gsnLeftStringOrthogonalPosF',
+ 'gsnLeftStringParallelPosF', 'gsnMajorLatSpacing',
+ 'gsnMajorLonSpacing', 'gsnMaskLambertConformal',
+ 'gsnMaskLambertConformalOutlineOn', 'gsnMaximize',
+ 'gsnMinorLatSpacing', 'gsnMinorLonSpacing', 'gsnPanelBottom',
+ 'gsnPanelCenter', 'gsnPanelDebug', 'gsnPanelFigureStrings',
+ 'gsnPanelFigureStringsBackgroundFillColor',
+ 'gsnPanelFigureStringsFontHeightF', 'gsnPanelFigureStringsJust',
+ 'gsnPanelFigureStringsPerimOn', 'gsnPanelLabelBar', 'gsnPanelLeft',
+ 'gsnPanelMainFont', 'gsnPanelMainFontColor',
+ 'gsnPanelMainFontHeightF', 'gsnPanelMainString', 'gsnPanelRight',
+ 'gsnPanelRowSpec', 'gsnPanelScalePlotIndex', 'gsnPanelTop',
+ 'gsnPanelXF', 'gsnPanelXWhiteSpacePercent', 'gsnPanelYF',
+ 'gsnPanelYWhiteSpacePercent', 'gsnPaperHeight', 'gsnPaperMargin',
+ 'gsnPaperOrientation', 'gsnPaperWidth', 'gsnPolar',
+ 'gsnPolarLabelDistance', 'gsnPolarLabelFont',
+ 'gsnPolarLabelFontHeightF', 'gsnPolarLabelSpacing', 'gsnPolarTime',
+ 'gsnPolarUT', 'gsnRightString', 'gsnRightStringFontColor',
+ 'gsnRightStringFontHeightF', 'gsnRightStringFuncCode',
+ 'gsnRightStringOrthogonalPosF', 'gsnRightStringParallelPosF',
+ 'gsnScalarContour', 'gsnScale', 'gsnShape', 'gsnSpreadColorEnd',
+ 'gsnSpreadColorStart', 'gsnSpreadColors', 'gsnStringFont',
+ 'gsnStringFontColor', 'gsnStringFontHeightF', 'gsnStringFuncCode',
+ 'gsnTickMarksOn', 'gsnXAxisIrregular2Linear', 'gsnXAxisIrregular2Log',
+ 'gsnXRefLine', 'gsnXRefLineColor', 'gsnXRefLineDashPattern',
+ 'gsnXRefLineThicknessF', 'gsnXYAboveFillColors', 'gsnXYBarChart',
+ 'gsnXYBarChartBarWidth', 'gsnXYBarChartColors',
+ 'gsnXYBarChartColors2', 'gsnXYBarChartFillDotSizeF',
+ 'gsnXYBarChartFillLineThicknessF', 'gsnXYBarChartFillOpacityF',
+ 'gsnXYBarChartFillScaleF', 'gsnXYBarChartOutlineOnly',
+ 'gsnXYBarChartOutlineThicknessF', 'gsnXYBarChartPatterns',
+ 'gsnXYBarChartPatterns2', 'gsnXYBelowFillColors', 'gsnXYFillColors',
+ 'gsnXYFillOpacities', 'gsnXYLeftFillColors', 'gsnXYRightFillColors',
+ 'gsnYAxisIrregular2Linear', 'gsnYAxisIrregular2Log', 'gsnYRefLine',
+ 'gsnYRefLineColor', 'gsnYRefLineColors', 'gsnYRefLineDashPattern',
+ 'gsnYRefLineDashPatterns', 'gsnYRefLineThicknessF',
+ 'gsnYRefLineThicknesses', 'gsnZonalMean', 'gsnZonalMeanXMaxF',
+ 'gsnZonalMeanXMinF', 'gsnZonalMeanYRefLine', 'lbAutoManage',
+ 'lbBottomMarginF', 'lbBoxCount', 'lbBoxEndCapStyle', 'lbBoxFractions',
+ 'lbBoxLineColor', 'lbBoxLineDashPattern', 'lbBoxLineDashSegLenF',
+ 'lbBoxLineThicknessF', 'lbBoxLinesOn', 'lbBoxMajorExtentF',
+ 'lbBoxMinorExtentF', 'lbBoxSeparatorLinesOn', 'lbBoxSizing',
+ 'lbFillBackground', 'lbFillColor', 'lbFillColors', 'lbFillDotSizeF',
+ 'lbFillLineThicknessF', 'lbFillPattern', 'lbFillPatterns',
+ 'lbFillScaleF', 'lbFillScales', 'lbJustification', 'lbLabelAlignment',
+ 'lbLabelAngleF', 'lbLabelAutoStride', 'lbLabelBarOn',
+ 'lbLabelConstantSpacingF', 'lbLabelDirection', 'lbLabelFont',
+ 'lbLabelFontAspectF', 'lbLabelFontColor', 'lbLabelFontHeightF',
+ 'lbLabelFontQuality', 'lbLabelFontThicknessF', 'lbLabelFuncCode',
+ 'lbLabelJust', 'lbLabelOffsetF', 'lbLabelPosition', 'lbLabelStride',
+ 'lbLabelStrings', 'lbLabelsOn', 'lbLeftMarginF', 'lbMaxLabelLenF',
+ 'lbMinLabelSpacingF', 'lbMonoFillColor', 'lbMonoFillPattern',
+ 'lbMonoFillScale', 'lbOrientation', 'lbPerimColor',
+ 'lbPerimDashPattern', 'lbPerimDashSegLenF', 'lbPerimFill',
+ 'lbPerimFillColor', 'lbPerimOn', 'lbPerimThicknessF',
+ 'lbRasterFillOn', 'lbRightMarginF', 'lbTitleAngleF',
+ 'lbTitleConstantSpacingF', 'lbTitleDirection', 'lbTitleExtentF',
+ 'lbTitleFont', 'lbTitleFontAspectF', 'lbTitleFontColor',
+ 'lbTitleFontHeightF', 'lbTitleFontQuality', 'lbTitleFontThicknessF',
+ 'lbTitleFuncCode', 'lbTitleJust', 'lbTitleOffsetF', 'lbTitleOn',
+ 'lbTitlePosition', 'lbTitleString', 'lbTopMarginF', 'lgAutoManage',
+ 'lgBottomMarginF', 'lgBoxBackground', 'lgBoxLineColor',
+ 'lgBoxLineDashPattern', 'lgBoxLineDashSegLenF', 'lgBoxLineThicknessF',
+ 'lgBoxLinesOn', 'lgBoxMajorExtentF', 'lgBoxMinorExtentF',
+ 'lgDashIndex', 'lgDashIndexes', 'lgItemCount', 'lgItemOrder',
+ 'lgItemPlacement', 'lgItemPositions', 'lgItemType', 'lgItemTypes',
+ 'lgJustification', 'lgLabelAlignment', 'lgLabelAngleF',
+ 'lgLabelAutoStride', 'lgLabelConstantSpacingF', 'lgLabelDirection',
+ 'lgLabelFont', 'lgLabelFontAspectF', 'lgLabelFontColor',
+ 'lgLabelFontHeightF', 'lgLabelFontQuality', 'lgLabelFontThicknessF',
+ 'lgLabelFuncCode', 'lgLabelJust', 'lgLabelOffsetF', 'lgLabelPosition',
+ 'lgLabelStride', 'lgLabelStrings', 'lgLabelsOn', 'lgLeftMarginF',
+ 'lgLegendOn', 'lgLineColor', 'lgLineColors', 'lgLineDashSegLenF',
+ 'lgLineDashSegLens', 'lgLineLabelConstantSpacingF', 'lgLineLabelFont',
+ 'lgLineLabelFontAspectF', 'lgLineLabelFontColor',
+ 'lgLineLabelFontColors', 'lgLineLabelFontHeightF',
+ 'lgLineLabelFontHeights', 'lgLineLabelFontQuality',
+ 'lgLineLabelFontThicknessF', 'lgLineLabelFuncCode',
+ 'lgLineLabelStrings', 'lgLineLabelsOn', 'lgLineThicknessF',
+ 'lgLineThicknesses', 'lgMarkerColor', 'lgMarkerColors',
+ 'lgMarkerIndex', 'lgMarkerIndexes', 'lgMarkerSizeF', 'lgMarkerSizes',
+ 'lgMarkerThicknessF', 'lgMarkerThicknesses', 'lgMonoDashIndex',
+ 'lgMonoItemType', 'lgMonoLineColor', 'lgMonoLineDashSegLen',
+ 'lgMonoLineLabelFontColor', 'lgMonoLineLabelFontHeight',
+ 'lgMonoLineThickness', 'lgMonoMarkerColor', 'lgMonoMarkerIndex',
+ 'lgMonoMarkerSize', 'lgMonoMarkerThickness', 'lgOrientation',
+ 'lgPerimColor', 'lgPerimDashPattern', 'lgPerimDashSegLenF',
+ 'lgPerimFill', 'lgPerimFillColor', 'lgPerimOn', 'lgPerimThicknessF',
+ 'lgRightMarginF', 'lgTitleAngleF', 'lgTitleConstantSpacingF',
+ 'lgTitleDirection', 'lgTitleExtentF', 'lgTitleFont',
+ 'lgTitleFontAspectF', 'lgTitleFontColor', 'lgTitleFontHeightF',
+ 'lgTitleFontQuality', 'lgTitleFontThicknessF', 'lgTitleFuncCode',
+ 'lgTitleJust', 'lgTitleOffsetF', 'lgTitleOn', 'lgTitlePosition',
+ 'lgTitleString', 'lgTopMarginF', 'mpAreaGroupCount',
+ 'mpAreaMaskingOn', 'mpAreaNames', 'mpAreaTypes', 'mpBottomAngleF',
+ 'mpBottomMapPosF', 'mpBottomNDCF', 'mpBottomNPCF',
+ 'mpBottomPointLatF', 'mpBottomPointLonF', 'mpBottomWindowF',
+ 'mpCenterLatF', 'mpCenterLonF', 'mpCenterRotF', 'mpCountyLineColor',
+ 'mpCountyLineDashPattern', 'mpCountyLineDashSegLenF',
+ 'mpCountyLineThicknessF', 'mpDataBaseVersion', 'mpDataResolution',
+ 'mpDataSetName', 'mpDefaultFillColor', 'mpDefaultFillPattern',
+ 'mpDefaultFillScaleF', 'mpDynamicAreaGroups', 'mpEllipticalBoundary',
+ 'mpFillAreaSpecifiers', 'mpFillBoundarySets', 'mpFillColor',
+ 'mpFillColors', 'mpFillColors-default', 'mpFillDotSizeF',
+ 'mpFillDrawOrder', 'mpFillOn', 'mpFillPatternBackground',
+ 'mpFillPattern', 'mpFillPatterns', 'mpFillPatterns-default',
+ 'mpFillScaleF', 'mpFillScales', 'mpFillScales-default',
+ 'mpFixedAreaGroups', 'mpGeophysicalLineColor',
+ 'mpGeophysicalLineDashPattern', 'mpGeophysicalLineDashSegLenF',
+ 'mpGeophysicalLineThicknessF', 'mpGreatCircleLinesOn',
+ 'mpGridAndLimbDrawOrder', 'mpGridAndLimbOn', 'mpGridLatSpacingF',
+ 'mpGridLineColor', 'mpGridLineDashPattern', 'mpGridLineDashSegLenF',
+ 'mpGridLineThicknessF', 'mpGridLonSpacingF', 'mpGridMaskMode',
+ 'mpGridMaxLatF', 'mpGridPolarLonSpacingF', 'mpGridSpacingF',
+ 'mpInlandWaterFillColor', 'mpInlandWaterFillPattern',
+ 'mpInlandWaterFillScaleF', 'mpLabelDrawOrder', 'mpLabelFontColor',
+ 'mpLabelFontHeightF', 'mpLabelsOn', 'mpLambertMeridianF',
+ 'mpLambertParallel1F', 'mpLambertParallel2F', 'mpLandFillColor',
+ 'mpLandFillPattern', 'mpLandFillScaleF', 'mpLeftAngleF',
+ 'mpLeftCornerLatF', 'mpLeftCornerLonF', 'mpLeftMapPosF',
+ 'mpLeftNDCF', 'mpLeftNPCF', 'mpLeftPointLatF',
+ 'mpLeftPointLonF', 'mpLeftWindowF', 'mpLimbLineColor',
+ 'mpLimbLineDashPattern', 'mpLimbLineDashSegLenF',
+ 'mpLimbLineThicknessF', 'mpLimitMode', 'mpMaskAreaSpecifiers',
+ 'mpMaskOutlineSpecifiers', 'mpMaxLatF', 'mpMaxLonF',
+ 'mpMinLatF', 'mpMinLonF', 'mpMonoFillColor', 'mpMonoFillPattern',
+ 'mpMonoFillScale', 'mpNationalLineColor', 'mpNationalLineDashPattern',
+ 'mpNationalLineThicknessF', 'mpOceanFillColor', 'mpOceanFillPattern',
+ 'mpOceanFillScaleF', 'mpOutlineBoundarySets', 'mpOutlineDrawOrder',
+ 'mpOutlineMaskingOn', 'mpOutlineOn', 'mpOutlineSpecifiers',
+ 'mpPerimDrawOrder', 'mpPerimLineColor', 'mpPerimLineDashPattern',
+ 'mpPerimLineDashSegLenF', 'mpPerimLineThicknessF', 'mpPerimOn',
+ 'mpPolyMode', 'mpProjection', 'mpProvincialLineColor',
+ 'mpProvincialLineDashPattern', 'mpProvincialLineDashSegLenF',
+ 'mpProvincialLineThicknessF', 'mpRelativeCenterLat',
+ 'mpRelativeCenterLon', 'mpRightAngleF', 'mpRightCornerLatF',
+ 'mpRightCornerLonF', 'mpRightMapPosF', 'mpRightNDCF',
+ 'mpRightNPCF', 'mpRightPointLatF', 'mpRightPointLonF',
+ 'mpRightWindowF', 'mpSatelliteAngle1F', 'mpSatelliteAngle2F',
+ 'mpSatelliteDistF', 'mpShapeMode', 'mpSpecifiedFillColors',
+ 'mpSpecifiedFillDirectIndexing', 'mpSpecifiedFillPatterns',
+ 'mpSpecifiedFillPriority', 'mpSpecifiedFillScales',
+ 'mpTopAngleF', 'mpTopMapPosF', 'mpTopNDCF', 'mpTopNPCF',
+ 'mpTopPointLatF', 'mpTopPointLonF', 'mpTopWindowF',
+ 'mpUSStateLineColor', 'mpUSStateLineDashPattern',
+ 'mpUSStateLineDashSegLenF', 'mpUSStateLineThicknessF',
+ 'pmAnnoManagers', 'pmAnnoViews', 'pmLabelBarDisplayMode',
+ 'pmLabelBarHeightF', 'pmLabelBarKeepAspect', 'pmLabelBarOrthogonalPosF',
+ 'pmLabelBarParallelPosF', 'pmLabelBarSide', 'pmLabelBarWidthF',
+ 'pmLabelBarZone', 'pmLegendDisplayMode', 'pmLegendHeightF',
+ 'pmLegendKeepAspect', 'pmLegendOrthogonalPosF',
+ 'pmLegendParallelPosF', 'pmLegendSide', 'pmLegendWidthF',
+ 'pmLegendZone', 'pmOverlaySequenceIds', 'pmTickMarkDisplayMode',
+ 'pmTickMarkZone', 'pmTitleDisplayMode', 'pmTitleZone',
+ 'prGraphicStyle', 'prPolyType', 'prXArray', 'prYArray',
+ 'sfCopyData', 'sfDataArray', 'sfDataMaxV', 'sfDataMinV',
+ 'sfElementNodes', 'sfExchangeDimensions', 'sfFirstNodeIndex',
+ 'sfMissingValueV', 'sfXArray', 'sfXCActualEndF', 'sfXCActualStartF',
+ 'sfXCEndIndex', 'sfXCEndSubsetV', 'sfXCEndV', 'sfXCStartIndex',
+ 'sfXCStartSubsetV', 'sfXCStartV', 'sfXCStride', 'sfXCellBounds',
+ 'sfYArray', 'sfYCActualEndF', 'sfYCActualStartF', 'sfYCEndIndex',
+ 'sfYCEndSubsetV', 'sfYCEndV', 'sfYCStartIndex', 'sfYCStartSubsetV',
+ 'sfYCStartV', 'sfYCStride', 'sfYCellBounds', 'stArrowLengthF',
+ 'stArrowStride', 'stCrossoverCheckCount',
+ 'stExplicitLabelBarLabelsOn', 'stLabelBarEndLabelsOn',
+ 'stLabelFormat', 'stLengthCheckCount', 'stLevelColors',
+ 'stLevelCount', 'stLevelPalette', 'stLevelSelectionMode',
+ 'stLevelSpacingF', 'stLevels', 'stLineColor', 'stLineOpacityF',
+ 'stLineStartStride', 'stLineThicknessF', 'stMapDirection',
+ 'stMaxLevelCount', 'stMaxLevelValF', 'stMinArrowSpacingF',
+ 'stMinDistanceF', 'stMinLevelValF', 'stMinLineSpacingF',
+ 'stMinStepFactorF', 'stMonoLineColor', 'stNoDataLabelOn',
+ 'stNoDataLabelString', 'stScalarFieldData', 'stScalarMissingValColor',
+ 'stSpanLevelPalette', 'stStepSizeF', 'stStreamlineDrawOrder',
+ 'stUseScalarArray', 'stVectorFieldData', 'stZeroFLabelAngleF',
+ 'stZeroFLabelBackgroundColor', 'stZeroFLabelConstantSpacingF',
+ 'stZeroFLabelFont', 'stZeroFLabelFontAspectF',
+ 'stZeroFLabelFontColor', 'stZeroFLabelFontHeightF',
+ 'stZeroFLabelFontQuality', 'stZeroFLabelFontThicknessF',
+ 'stZeroFLabelFuncCode', 'stZeroFLabelJust', 'stZeroFLabelOn',
+ 'stZeroFLabelOrthogonalPosF', 'stZeroFLabelParallelPosF',
+ 'stZeroFLabelPerimColor', 'stZeroFLabelPerimOn',
+ 'stZeroFLabelPerimSpaceF', 'stZeroFLabelPerimThicknessF',
+ 'stZeroFLabelSide', 'stZeroFLabelString', 'stZeroFLabelTextDirection',
+ 'stZeroFLabelZone', 'tfDoNDCOverlay', 'tfPlotManagerOn',
+ 'tfPolyDrawList', 'tfPolyDrawOrder', 'tiDeltaF', 'tiMainAngleF',
+ 'tiMainConstantSpacingF', 'tiMainDirection', 'tiMainFont',
+ 'tiMainFontAspectF', 'tiMainFontColor', 'tiMainFontHeightF',
+ 'tiMainFontQuality', 'tiMainFontThicknessF', 'tiMainFuncCode',
+ 'tiMainJust', 'tiMainOffsetXF', 'tiMainOffsetYF', 'tiMainOn',
+ 'tiMainPosition', 'tiMainSide', 'tiMainString', 'tiUseMainAttributes',
+ 'tiXAxisAngleF', 'tiXAxisConstantSpacingF', 'tiXAxisDirection',
+ 'tiXAxisFont', 'tiXAxisFontAspectF', 'tiXAxisFontColor',
+ 'tiXAxisFontHeightF', 'tiXAxisFontQuality', 'tiXAxisFontThicknessF',
+ 'tiXAxisFuncCode', 'tiXAxisJust', 'tiXAxisOffsetXF',
+ 'tiXAxisOffsetYF', 'tiXAxisOn', 'tiXAxisPosition', 'tiXAxisSide',
+ 'tiXAxisString', 'tiYAxisAngleF', 'tiYAxisConstantSpacingF',
+ 'tiYAxisDirection', 'tiYAxisFont', 'tiYAxisFontAspectF',
+ 'tiYAxisFontColor', 'tiYAxisFontHeightF', 'tiYAxisFontQuality',
+ 'tiYAxisFontThicknessF', 'tiYAxisFuncCode', 'tiYAxisJust',
+ 'tiYAxisOffsetXF', 'tiYAxisOffsetYF', 'tiYAxisOn', 'tiYAxisPosition',
+ 'tiYAxisSide', 'tiYAxisString', 'tmBorderLineColor',
+ 'tmBorderThicknessF', 'tmEqualizeXYSizes', 'tmLabelAutoStride',
+ 'tmSciNoteCutoff', 'tmXBAutoPrecision', 'tmXBBorderOn',
+ 'tmXBDataLeftF', 'tmXBDataRightF', 'tmXBFormat', 'tmXBIrrTensionF',
+ 'tmXBIrregularPoints', 'tmXBLabelAngleF', 'tmXBLabelConstantSpacingF',
+ 'tmXBLabelDeltaF', 'tmXBLabelDirection', 'tmXBLabelFont',
+ 'tmXBLabelFontAspectF', 'tmXBLabelFontColor', 'tmXBLabelFontHeightF',
+ 'tmXBLabelFontQuality', 'tmXBLabelFontThicknessF',
+ 'tmXBLabelFuncCode', 'tmXBLabelJust', 'tmXBLabelStride', 'tmXBLabels',
+ 'tmXBLabelsOn', 'tmXBMajorLengthF', 'tmXBMajorLineColor',
+ 'tmXBMajorOutwardLengthF', 'tmXBMajorThicknessF', 'tmXBMaxLabelLenF',
+ 'tmXBMaxTicks', 'tmXBMinLabelSpacingF', 'tmXBMinorLengthF',
+ 'tmXBMinorLineColor', 'tmXBMinorOn', 'tmXBMinorOutwardLengthF',
+ 'tmXBMinorPerMajor', 'tmXBMinorThicknessF', 'tmXBMinorValues',
+ 'tmXBMode', 'tmXBOn', 'tmXBPrecision', 'tmXBStyle', 'tmXBTickEndF',
+ 'tmXBTickSpacingF', 'tmXBTickStartF', 'tmXBValues', 'tmXMajorGrid',
+ 'tmXMajorGridLineColor', 'tmXMajorGridLineDashPattern',
+ 'tmXMajorGridThicknessF', 'tmXMinorGrid', 'tmXMinorGridLineColor',
+ 'tmXMinorGridLineDashPattern', 'tmXMinorGridThicknessF',
+ 'tmXTAutoPrecision', 'tmXTBorderOn', 'tmXTDataLeftF',
+ 'tmXTDataRightF', 'tmXTFormat', 'tmXTIrrTensionF',
+ 'tmXTIrregularPoints', 'tmXTLabelAngleF', 'tmXTLabelConstantSpacingF',
+ 'tmXTLabelDeltaF', 'tmXTLabelDirection', 'tmXTLabelFont',
+ 'tmXTLabelFontAspectF', 'tmXTLabelFontColor', 'tmXTLabelFontHeightF',
+ 'tmXTLabelFontQuality', 'tmXTLabelFontThicknessF',
+ 'tmXTLabelFuncCode', 'tmXTLabelJust', 'tmXTLabelStride', 'tmXTLabels',
+ 'tmXTLabelsOn', 'tmXTMajorLengthF', 'tmXTMajorLineColor',
+ 'tmXTMajorOutwardLengthF', 'tmXTMajorThicknessF', 'tmXTMaxLabelLenF',
+ 'tmXTMaxTicks', 'tmXTMinLabelSpacingF', 'tmXTMinorLengthF',
+ 'tmXTMinorLineColor', 'tmXTMinorOn', 'tmXTMinorOutwardLengthF',
+ 'tmXTMinorPerMajor', 'tmXTMinorThicknessF', 'tmXTMinorValues',
+ 'tmXTMode', 'tmXTOn', 'tmXTPrecision', 'tmXTStyle', 'tmXTTickEndF',
+ 'tmXTTickSpacingF', 'tmXTTickStartF', 'tmXTValues', 'tmXUseBottom',
+ 'tmYLAutoPrecision', 'tmYLBorderOn', 'tmYLDataBottomF',
+ 'tmYLDataTopF', 'tmYLFormat', 'tmYLIrrTensionF',
+ 'tmYLIrregularPoints', 'tmYLLabelAngleF', 'tmYLLabelConstantSpacingF',
+ 'tmYLLabelDeltaF', 'tmYLLabelDirection', 'tmYLLabelFont',
+ 'tmYLLabelFontAspectF', 'tmYLLabelFontColor', 'tmYLLabelFontHeightF',
+ 'tmYLLabelFontQuality', 'tmYLLabelFontThicknessF',
+ 'tmYLLabelFuncCode', 'tmYLLabelJust', 'tmYLLabelStride', 'tmYLLabels',
+ 'tmYLLabelsOn', 'tmYLMajorLengthF', 'tmYLMajorLineColor',
+ 'tmYLMajorOutwardLengthF', 'tmYLMajorThicknessF', 'tmYLMaxLabelLenF',
+ 'tmYLMaxTicks', 'tmYLMinLabelSpacingF', 'tmYLMinorLengthF',
+ 'tmYLMinorLineColor', 'tmYLMinorOn', 'tmYLMinorOutwardLengthF',
+ 'tmYLMinorPerMajor', 'tmYLMinorThicknessF', 'tmYLMinorValues',
+ 'tmYLMode', 'tmYLOn', 'tmYLPrecision', 'tmYLStyle', 'tmYLTickEndF',
+ 'tmYLTickSpacingF', 'tmYLTickStartF', 'tmYLValues', 'tmYMajorGrid',
+ 'tmYMajorGridLineColor', 'tmYMajorGridLineDashPattern',
+ 'tmYMajorGridThicknessF', 'tmYMinorGrid', 'tmYMinorGridLineColor',
+ 'tmYMinorGridLineDashPattern', 'tmYMinorGridThicknessF',
+ 'tmYRAutoPrecision', 'tmYRBorderOn', 'tmYRDataBottomF',
+ 'tmYRDataTopF', 'tmYRFormat', 'tmYRIrrTensionF',
+ 'tmYRIrregularPoints', 'tmYRLabelAngleF', 'tmYRLabelConstantSpacingF',
+ 'tmYRLabelDeltaF', 'tmYRLabelDirection', 'tmYRLabelFont',
+ 'tmYRLabelFontAspectF', 'tmYRLabelFontColor', 'tmYRLabelFontHeightF',
+ 'tmYRLabelFontQuality', 'tmYRLabelFontThicknessF',
+ 'tmYRLabelFuncCode', 'tmYRLabelJust', 'tmYRLabelStride', 'tmYRLabels',
+ 'tmYRLabelsOn', 'tmYRMajorLengthF', 'tmYRMajorLineColor',
+ 'tmYRMajorOutwardLengthF', 'tmYRMajorThicknessF', 'tmYRMaxLabelLenF',
+ 'tmYRMaxTicks', 'tmYRMinLabelSpacingF', 'tmYRMinorLengthF',
+ 'tmYRMinorLineColor', 'tmYRMinorOn', 'tmYRMinorOutwardLengthF',
+ 'tmYRMinorPerMajor', 'tmYRMinorThicknessF', 'tmYRMinorValues',
+ 'tmYRMode', 'tmYROn', 'tmYRPrecision', 'tmYRStyle', 'tmYRTickEndF',
+ 'tmYRTickSpacingF', 'tmYRTickStartF', 'tmYRValues', 'tmYUseLeft',
+ 'trGridType', 'trLineInterpolationOn',
+ 'trXAxisType', 'trXCoordPoints', 'trXInterPoints', 'trXLog',
+ 'trXMaxF', 'trXMinF', 'trXReverse', 'trXSamples', 'trXTensionF',
+ 'trYAxisType', 'trYCoordPoints', 'trYInterPoints', 'trYLog',
+ 'trYMaxF', 'trYMinF', 'trYReverse', 'trYSamples', 'trYTensionF',
+ 'txAngleF', 'txBackgroundFillColor', 'txConstantSpacingF', 'txDirection',
+ 'txFont', 'HLU-Fonts', 'txFontAspectF', 'txFontColor',
+ 'txFontHeightF', 'txFontOpacityF', 'txFontQuality',
+ 'txFontThicknessF', 'txFuncCode', 'txJust', 'txPerimColor',
+ 'txPerimDashLengthF', 'txPerimDashPattern', 'txPerimOn',
+ 'txPerimSpaceF', 'txPerimThicknessF', 'txPosXF', 'txPosYF',
+ 'txString', 'vcExplicitLabelBarLabelsOn', 'vcFillArrowEdgeColor',
+ 'vcFillArrowEdgeThicknessF', 'vcFillArrowFillColor',
+ 'vcFillArrowHeadInteriorXF', 'vcFillArrowHeadMinFracXF',
+ 'vcFillArrowHeadMinFracYF', 'vcFillArrowHeadXF', 'vcFillArrowHeadYF',
+ 'vcFillArrowMinFracWidthF', 'vcFillArrowWidthF', 'vcFillArrowsOn',
+ 'vcFillOverEdge', 'vcGlyphOpacityF', 'vcGlyphStyle',
+ 'vcLabelBarEndLabelsOn', 'vcLabelFontColor', 'vcLabelFontHeightF',
+ 'vcLabelsOn', 'vcLabelsUseVectorColor', 'vcLevelColors',
+ 'vcLevelCount', 'vcLevelPalette', 'vcLevelSelectionMode',
+ 'vcLevelSpacingF', 'vcLevels', 'vcLineArrowColor',
+ 'vcLineArrowHeadMaxSizeF', 'vcLineArrowHeadMinSizeF',
+ 'vcLineArrowThicknessF', 'vcMagnitudeFormat',
+ 'vcMagnitudeScaleFactorF', 'vcMagnitudeScaleValueF',
+ 'vcMagnitudeScalingMode', 'vcMapDirection', 'vcMaxLevelCount',
+ 'vcMaxLevelValF', 'vcMaxMagnitudeF', 'vcMinAnnoAngleF',
+ 'vcMinAnnoArrowAngleF', 'vcMinAnnoArrowEdgeColor',
+ 'vcMinAnnoArrowFillColor', 'vcMinAnnoArrowLineColor',
+ 'vcMinAnnoArrowMinOffsetF', 'vcMinAnnoArrowSpaceF',
+ 'vcMinAnnoArrowUseVecColor', 'vcMinAnnoBackgroundColor',
+ 'vcMinAnnoConstantSpacingF', 'vcMinAnnoExplicitMagnitudeF',
+ 'vcMinAnnoFont', 'vcMinAnnoFontAspectF', 'vcMinAnnoFontColor',
+ 'vcMinAnnoFontHeightF', 'vcMinAnnoFontQuality',
+ 'vcMinAnnoFontThicknessF', 'vcMinAnnoFuncCode', 'vcMinAnnoJust',
+ 'vcMinAnnoOn', 'vcMinAnnoOrientation', 'vcMinAnnoOrthogonalPosF',
+ 'vcMinAnnoParallelPosF', 'vcMinAnnoPerimColor', 'vcMinAnnoPerimOn',
+ 'vcMinAnnoPerimSpaceF', 'vcMinAnnoPerimThicknessF', 'vcMinAnnoSide',
+ 'vcMinAnnoString1', 'vcMinAnnoString1On', 'vcMinAnnoString2',
+ 'vcMinAnnoString2On', 'vcMinAnnoTextDirection', 'vcMinAnnoZone',
+ 'vcMinDistanceF', 'vcMinFracLengthF', 'vcMinLevelValF',
+ 'vcMinMagnitudeF', 'vcMonoFillArrowEdgeColor',
+ 'vcMonoFillArrowFillColor', 'vcMonoLineArrowColor',
+ 'vcMonoWindBarbColor', 'vcNoDataLabelOn', 'vcNoDataLabelString',
+ 'vcPositionMode', 'vcRefAnnoAngleF', 'vcRefAnnoArrowAngleF',
+ 'vcRefAnnoArrowEdgeColor', 'vcRefAnnoArrowFillColor',
+ 'vcRefAnnoArrowLineColor', 'vcRefAnnoArrowMinOffsetF',
+ 'vcRefAnnoArrowSpaceF', 'vcRefAnnoArrowUseVecColor',
+ 'vcRefAnnoBackgroundColor', 'vcRefAnnoConstantSpacingF',
+ 'vcRefAnnoExplicitMagnitudeF', 'vcRefAnnoFont',
+ 'vcRefAnnoFontAspectF', 'vcRefAnnoFontColor', 'vcRefAnnoFontHeightF',
+ 'vcRefAnnoFontQuality', 'vcRefAnnoFontThicknessF',
+ 'vcRefAnnoFuncCode', 'vcRefAnnoJust', 'vcRefAnnoOn',
+ 'vcRefAnnoOrientation', 'vcRefAnnoOrthogonalPosF',
+ 'vcRefAnnoParallelPosF', 'vcRefAnnoPerimColor', 'vcRefAnnoPerimOn',
+ 'vcRefAnnoPerimSpaceF', 'vcRefAnnoPerimThicknessF', 'vcRefAnnoSide',
+ 'vcRefAnnoString1', 'vcRefAnnoString1On', 'vcRefAnnoString2',
+ 'vcRefAnnoString2On', 'vcRefAnnoTextDirection', 'vcRefAnnoZone',
+ 'vcRefLengthF', 'vcRefMagnitudeF', 'vcScalarFieldData',
+ 'vcScalarMissingValColor', 'vcScalarValueFormat',
+ 'vcScalarValueScaleFactorF', 'vcScalarValueScaleValueF',
+ 'vcScalarValueScalingMode', 'vcSpanLevelPalette', 'vcUseRefAnnoRes',
+ 'vcUseScalarArray', 'vcVectorDrawOrder', 'vcVectorFieldData',
+ 'vcWindBarbCalmCircleSizeF', 'vcWindBarbColor',
+ 'vcWindBarbLineThicknessF', 'vcWindBarbScaleFactorF',
+ 'vcWindBarbTickAngleF', 'vcWindBarbTickLengthF',
+ 'vcWindBarbTickSpacingF', 'vcZeroFLabelAngleF',
+ 'vcZeroFLabelBackgroundColor', 'vcZeroFLabelConstantSpacingF',
+ 'vcZeroFLabelFont', 'vcZeroFLabelFontAspectF',
+ 'vcZeroFLabelFontColor', 'vcZeroFLabelFontHeightF',
+ 'vcZeroFLabelFontQuality', 'vcZeroFLabelFontThicknessF',
+ 'vcZeroFLabelFuncCode', 'vcZeroFLabelJust', 'vcZeroFLabelOn',
+ 'vcZeroFLabelOrthogonalPosF', 'vcZeroFLabelParallelPosF',
+ 'vcZeroFLabelPerimColor', 'vcZeroFLabelPerimOn',
+ 'vcZeroFLabelPerimSpaceF', 'vcZeroFLabelPerimThicknessF',
+ 'vcZeroFLabelSide', 'vcZeroFLabelString', 'vcZeroFLabelTextDirection',
+ 'vcZeroFLabelZone', 'vfCopyData', 'vfDataArray',
+ 'vfExchangeDimensions', 'vfExchangeUVData', 'vfMagMaxV', 'vfMagMinV',
+ 'vfMissingUValueV', 'vfMissingVValueV', 'vfPolarData',
+ 'vfSingleMissingValue', 'vfUDataArray', 'vfUMaxV', 'vfUMinV',
+ 'vfVDataArray', 'vfVMaxV', 'vfVMinV', 'vfXArray', 'vfXCActualEndF',
+ 'vfXCActualStartF', 'vfXCEndIndex', 'vfXCEndSubsetV', 'vfXCEndV',
+ 'vfXCStartIndex', 'vfXCStartSubsetV', 'vfXCStartV', 'vfXCStride',
+ 'vfYArray', 'vfYCActualEndF', 'vfYCActualStartF', 'vfYCEndIndex',
+ 'vfYCEndSubsetV', 'vfYCEndV', 'vfYCStartIndex', 'vfYCStartSubsetV',
+ 'vfYCStartV', 'vfYCStride', 'vpAnnoManagerId', 'vpClipOn',
+ 'vpHeightF', 'vpKeepAspect', 'vpOn', 'vpUseSegments', 'vpWidthF',
+ 'vpXF', 'vpYF', 'wkAntiAlias', 'wkBackgroundColor', 'wkBackgroundOpacityF',
+ 'wkColorMapLen', 'wkColorMap', 'wkColorModel', 'wkDashTableLength',
+ 'wkDefGraphicStyleId', 'wkDeviceLowerX', 'wkDeviceLowerY',
+ 'wkDeviceUpperX', 'wkDeviceUpperY', 'wkFileName', 'wkFillTableLength',
+ 'wkForegroundColor', 'wkFormat', 'wkFullBackground', 'wkGksWorkId',
+ 'wkHeight', 'wkMarkerTableLength', 'wkMetaName', 'wkOrientation',
+ 'wkPDFFileName', 'wkPDFFormat', 'wkPDFResolution', 'wkPSFileName',
+ 'wkPSFormat', 'wkPSResolution', 'wkPaperHeightF', 'wkPaperSize',
+ 'wkPaperWidthF', 'wkPause', 'wkTopLevelViews', 'wkViews',
+ 'wkVisualType', 'wkWidth', 'wkWindowId', 'wkXColorMode', 'wsCurrentSize',
+ 'wsMaximumSize', 'wsThresholdSize', 'xyComputeXMax',
+ 'xyComputeXMin', 'xyComputeYMax', 'xyComputeYMin', 'xyCoordData',
+ 'xyCoordDataSpec', 'xyCurveDrawOrder', 'xyDashPattern',
+ 'xyDashPatterns', 'xyExplicitLabels', 'xyExplicitLegendLabels',
+ 'xyLabelMode', 'xyLineColor', 'xyLineColors', 'xyLineDashSegLenF',
+ 'xyLineLabelConstantSpacingF', 'xyLineLabelFont',
+ 'xyLineLabelFontAspectF', 'xyLineLabelFontColor',
+ 'xyLineLabelFontColors', 'xyLineLabelFontHeightF',
+ 'xyLineLabelFontQuality', 'xyLineLabelFontThicknessF',
+ 'xyLineLabelFuncCode', 'xyLineThicknessF', 'xyLineThicknesses',
+ 'xyMarkLineMode', 'xyMarkLineModes', 'xyMarker', 'xyMarkerColor',
+ 'xyMarkerColors', 'xyMarkerSizeF', 'xyMarkerSizes',
+ 'xyMarkerThicknessF', 'xyMarkerThicknesses', 'xyMarkers',
+ 'xyMonoDashPattern', 'xyMonoLineColor', 'xyMonoLineLabelFontColor',
+ 'xyMonoLineThickness', 'xyMonoMarkLineMode', 'xyMonoMarker',
+ 'xyMonoMarkerColor', 'xyMonoMarkerSize', 'xyMonoMarkerThickness',
+ 'xyXIrrTensionF', 'xyXIrregularPoints', 'xyXStyle', 'xyYIrrTensionF',
+ 'xyYIrregularPoints', 'xyYStyle'), prefix=r'\b'),
+ Name.Builtin),
+
+ # Booleans
+ (r'\.(True|False)\.', Name.Builtin),
+ # Comparing Operators
+ (r'\.(eq|ne|lt|le|gt|ge|not|and|or|xor)\.', Operator.Word),
+ ],
+
+ 'strings': [
+ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
+ ],
+
+ 'nums': [
+ (r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
+ (r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
+ (r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
+ ],
+ }
diff --git a/pygments/lexers/nimrod.py b/pygments/lexers/nimrod.py
new file mode 100644
index 0000000..bf5c948
--- /dev/null
+++ b/pygments/lexers/nimrod.py
@@ -0,0 +1,200 @@
+"""
+ pygments.lexers.nimrod
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Nim language (formerly known as Nimrod).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, default, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error
+
+__all__ = ['NimrodLexer']
+
+
+class NimrodLexer(RegexLexer):
+ """
+ For Nim source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Nimrod'
+ url = 'http://nim-lang.org/'
+ aliases = ['nimrod', 'nim']
+ filenames = ['*.nim', '*.nimrod']
+ mimetypes = ['text/x-nim']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ def underscorize(words):
+ newWords = []
+ new = []
+ for word in words:
+ for ch in word:
+ new.append(ch)
+ new.append("_?")
+ newWords.append(''.join(new))
+ new = []
+ return "|".join(newWords)
+
+ keywords = [
+ 'addr', 'and', 'as', 'asm', 'bind', 'block', 'break', 'case',
+ 'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard',
+ 'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except',
+ 'export', 'finally', 'for', 'if', 'in', 'yield', 'interface',
+ 'is', 'isnot', 'iterator', 'let', 'mixin', 'mod',
+ 'not', 'notin', 'object', 'of', 'or', 'out', 'ptr', 'raise',
+ 'ref', 'return', 'shl', 'shr', 'static', 'try',
+ 'tuple', 'type', 'using', 'when', 'while', 'xor'
+ ]
+
+ keywordsPseudo = [
+ 'nil', 'true', 'false'
+ ]
+
+ opWords = [
+ 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
+ 'notin', 'is', 'isnot'
+ ]
+
+ types = [
+ 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
+ 'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
+ ]
+
+ tokens = {
+ 'root': [
+ # Comments
+ (r'##\[', String.Doc, 'doccomment'),
+ (r'##.*$', String.Doc),
+ (r'#\[', Comment.Multiline, 'comment'),
+ (r'#.*$', Comment),
+
+ # Pragmas
+ (r'\{\.', String.Other, 'pragma'),
+
+ # Operators
+ (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator),
+ (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;',
+ Punctuation),
+
+ # Case statement branch
+ (r'(\n\s*)(of)(\s)', bygroups(Text.Whitespace, Keyword,
+ Text.Whitespace), 'casebranch'),
+
+ # Strings
+ (r'(?:[\w]+)"', String, 'rdqs'),
+ (r'"""', String.Double, 'tdqs'),
+ ('"', String, 'dqs'),
+
+ # Char
+ ("'", String.Char, 'chars'),
+
+ # Keywords
+ (r'(%s)\b' % underscorize(opWords), Operator.Word),
+ (r'(proc|func|method|macro|template)(\s)(?![(\[\]])',
+ bygroups(Keyword, Text.Whitespace), 'funcname'),
+ (r'(%s)\b' % underscorize(keywords), Keyword),
+ (r'(%s)\b' % underscorize(['from', 'import', 'include', 'export']),
+ Keyword.Namespace),
+ (r'(v_?a_?r)\b', Keyword.Declaration),
+ (r'(%s)\b' % underscorize(types), Name.Builtin),
+ (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
+
+ # Identifiers
+ (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
+
+ # Numbers
+ (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))',
+ Number.Float, ('float-suffix', 'float-number')),
+ (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
+ (r'0b[01][01_]*', Number.Bin, 'int-suffix'),
+ (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
+ (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
+
+ # Whitespace
+ (r'\s+', Text.Whitespace),
+ (r'.+$', Error),
+ ],
+ 'chars': [
+ (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
+ (r"'", String.Char, '#pop'),
+ (r".", String.Char)
+ ],
+ 'strings': [
+ (r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
+ (r'[^\\\'"$\n]+', String),
+ # quotes, dollars and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'\$', String)
+ # newlines are an error (use "nl" state)
+ ],
+ 'doccomment': [
+ (r'[^\]#]+', String.Doc),
+ (r'##\[', String.Doc, '#push'),
+ (r'\]##', String.Doc, '#pop'),
+ (r'[\]#]', String.Doc),
+ ],
+ 'comment': [
+ (r'[^\]#]+', Comment.Multiline),
+ (r'#\[', Comment.Multiline, '#push'),
+ (r'\]#', Comment.Multiline, '#pop'),
+ (r'[\]#]', Comment.Multiline),
+ ],
+ 'dqs': [
+ (r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})',
+ String.Escape),
+ (r'"', String, '#pop'),
+ include('strings')
+ ],
+ 'rdqs': [
+ (r'"(?!")', String, '#pop'),
+ (r'""', String.Escape),
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String.Double, '#pop'),
+ include('strings'),
+ (r'\n', String.Double)
+ ],
+ 'funcname': [
+ (r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
+ (r'`.+`', Name.Function, '#pop')
+ ],
+ 'nl': [
+ (r'\n', String)
+ ],
+ 'float-number': [
+ (r'\.(?!\.)[0-9_]*[f]*', Number.Float),
+ (r'e[+-]?[0-9][0-9_]*', Number.Float),
+ default('#pop')
+ ],
+ 'float-suffix': [
+ (r'\'f(32|64)', Number.Float),
+ default('#pop')
+ ],
+ 'int-suffix': [
+ (r'\'i(32|64)', Number.Integer.Long),
+ (r'\'i(8|16)', Number.Integer),
+ default('#pop')
+ ],
+ 'casebranch': [
+ (r',', Punctuation),
+ (r'[\n ]+', Text.Whitespace),
+ (r':', Operator, '#pop'),
+ (r'\w+|[^:]', Name.Label),
+ ],
+ 'pragma': [
+ (r'[:,]', Text),
+ (r'[\n ]+', Text.Whitespace),
+ (r'\.\}', String.Other, '#pop'),
+ (r'\w+|\W+|[^.}]', String.Other),
+ ],
+ }
diff --git a/pygments/lexers/nit.py b/pygments/lexers/nit.py
new file mode 100644
index 0000000..5c97574
--- /dev/null
+++ b/pygments/lexers/nit.py
@@ -0,0 +1,64 @@
+"""
+ pygments.lexers.nit
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Nit language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['NitLexer']
+
+
+class NitLexer(RegexLexer):
+ """
+ For nit source.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Nit'
+ url = 'http://nitlanguage.org'
+ aliases = ['nit']
+ filenames = ['*.nit']
+ tokens = {
+ 'root': [
+ (r'#.*?$', Comment.Single),
+ (words((
+ 'package', 'module', 'import', 'class', 'abstract', 'interface',
+ 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef',
+ 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern',
+ 'public', 'protected', 'private', 'intrude', 'if', 'then',
+ 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not',
+ 'implies', 'return', 'continue', 'break', 'abort', 'assert',
+ 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable',
+ 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'),
+ Keyword),
+ (r'[A-Z]\w*', Name.Class),
+ (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string
+ (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|'
+ r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt
+ (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string
+ (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string
+ (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string
+ (r'"(\\.|([^"}{\\]))*"', String), # Simple String
+ (r'"(\\.|([^"}{\\]))*\{', String), # Start string
+ (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String
+ (r'\}(\\.|([^"}{\\]))*"', String), # End String
+ (r'(\'[^\'\\]\')|(\'\\.\')', String.Char),
+ (r'[0-9]+', Number.Integer),
+ (r'[0-9]*.[0-9]+', Number.Float),
+ (r'0(x|X)[0-9A-Fa-f]+', Number.Hex),
+ (r'[a-z]\w*', Name),
+ (r'_\w+', Name.Variable.Instance),
+ (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator),
+ (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation),
+ (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit
+ (r'[\r\n\t ]+', Text),
+ ],
+ }
diff --git a/pygments/lexers/nix.py b/pygments/lexers/nix.py
new file mode 100644
index 0000000..55e2617
--- /dev/null
+++ b/pygments/lexers/nix.py
@@ -0,0 +1,135 @@
+"""
+ pygments.lexers.nix
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the NixOS Nix language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal
+
+__all__ = ['NixLexer']
+
+
+class NixLexer(RegexLexer):
+ """
+ For the Nix language.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Nix'
+ url = 'http://nixos.org/nix/'
+ aliases = ['nixos', 'nix']
+ filenames = ['*.nix']
+ mimetypes = ['text/x-nix']
+
+ keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
+ 'else', 'then', '...']
+ builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
+ 'map', 'removeAttrs', 'throw', 'toString', 'derivation']
+ operators = ['++', '+', '?', '.', '!', '//', '==',
+ '!=', '&&', '||', '->', '=']
+
+ punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
+
+ tokens = {
+ 'root': [
+ # comments starting with #
+ (r'#.*$', Comment.Single),
+
+ # multiline comments
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ # whitespace
+ (r'\s+', Text),
+
+ # keywords
+ ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword),
+
+ # highlight the builtins
+ ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
+ Name.Builtin),
+
+ (r'\b(true|false|null)\b', Name.Constant),
+
+ # operators
+ ('(%s)' % '|'.join(re.escape(entry) for entry in operators),
+ Operator),
+
+ # word operators
+ (r'\b(or|and)\b', Operator.Word),
+
+ # punctuations
+ ('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation),
+
+ # integers
+ (r'[0-9]+', Number.Integer),
+
+ # strings
+ (r'"', String.Double, 'doublequote'),
+ (r"''", String.Single, 'singlequote'),
+
+ # paths
+ (r'[\w.+-]*(\/[\w.+-]+)+', Literal),
+ (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal),
+
+ # urls
+ (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal),
+
+ # names of variables
+ (r'[\w-]+\s*=', String.Symbol),
+ (r'[a-zA-Z_][\w\'-]*', Text),
+
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'singlequote': [
+ (r"'''", String.Escape),
+ (r"''\$\{", String.Escape),
+ (r"''\n", String.Escape),
+ (r"''\r", String.Escape),
+ (r"''\t", String.Escape),
+ (r"''", String.Single, '#pop'),
+ (r'\$\{', String.Interpol, 'antiquote'),
+ (r"['$]", String.Single),
+ (r"[^'$]+", String.Single),
+ ],
+ 'doublequote': [
+ (r'\\', String.Escape),
+ (r'\\"', String.Escape),
+ (r'\\$\{', String.Escape),
+ (r'"', String.Double, '#pop'),
+ (r'\$\{', String.Interpol, 'antiquote'),
+ (r'[^"]', String.Double),
+ ],
+ 'antiquote': [
+ (r"\}", String.Interpol, '#pop'),
+ # TODO: we should probably escape also here ''${ \${
+ (r"\$\{", String.Interpol, '#push'),
+ include('root'),
+ ],
+ }
+
+ def analyse_text(text):
+ rv = 0.0
+ # TODO: let/in
+ if re.search(r'import.+?<[^>]+>', text):
+ rv += 0.4
+ if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
+ rv += 0.4
+ if re.search(r'=\s+mkIf\s+', text):
+ rv += 0.4
+ if re.search(r'\{[a-zA-Z,\s]+\}:', text):
+ rv += 0.1
+ return rv
diff --git a/pygments/lexers/oberon.py b/pygments/lexers/oberon.py
new file mode 100644
index 0000000..da7a248
--- /dev/null
+++ b/pygments/lexers/oberon.py
@@ -0,0 +1,120 @@
+"""
+ pygments.lexers.oberon
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Oberon family languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['ComponentPascalLexer']
+
+
+class ComponentPascalLexer(RegexLexer):
+ """
+ For Component Pascal source code.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Component Pascal'
+ aliases = ['componentpascal', 'cp']
+ filenames = ['*.cp', '*.cps']
+ mimetypes = ['text/x-component-pascal']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ include('punctuation'),
+ include('numliterals'),
+ include('strings'),
+ include('operators'),
+ include('builtins'),
+ include('identifiers'),
+ ],
+ 'whitespace': [
+ (r'\n+', Text), # blank lines
+ (r'\s+', Text), # whitespace
+ ],
+ 'comments': [
+ (r'\(\*([^$].*?)\*\)', Comment.Multiline),
+ # TODO: nested comments (* (* ... *) ... (* ... *) *) not supported!
+ ],
+ 'punctuation': [
+ (r'[()\[\]{},.:;|]', Punctuation),
+ ],
+ 'numliterals': [
+ (r'[0-9A-F]+X\b', Number.Hex), # char code
+ (r'[0-9A-F]+[HL]\b', Number.Hex), # hexadecimal number
+ (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
+ (r'[0-9]+\.[0-9]+', Number.Float), # real number
+ (r'[0-9]+', Number.Integer), # decimal whole number
+ ],
+ 'strings': [
+ (r"'[^\n']*'", String), # single quoted string
+ (r'"[^\n"]*"', String), # double quoted string
+ ],
+ 'operators': [
+ # Arithmetic Operators
+ (r'[+-]', Operator),
+ (r'[*/]', Operator),
+ # Relational Operators
+ (r'[=#<>]', Operator),
+ # Dereferencing Operator
+ (r'\^', Operator),
+ # Logical AND Operator
+ (r'&', Operator),
+ # Logical NOT Operator
+ (r'~', Operator),
+ # Assignment Symbol
+ (r':=', Operator),
+ # Range Constructor
+ (r'\.\.', Operator),
+ (r'\$', Operator),
+ ],
+ 'identifiers': [
+ (r'([a-zA-Z_$][\w$]*)', Name),
+ ],
+ 'builtins': [
+ (words((
+ 'ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT',
+ 'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL'
+ ), suffix=r'\b'), Keyword.Type),
+ (words((
+ 'ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY',
+ 'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE',
+ 'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR',
+ 'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED',
+ 'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF',
+ 'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
+ 'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL',
+ 'VAR', 'WHILE', 'WITH'
+ ), suffix=r'\b'), Keyword.Reserved),
+ (r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant),
+ ]
+ }
+
+ def analyse_text(text):
+ """The only other lexer using .cp is the C++ one, so we check if for
+ a few common Pascal keywords here. Those are unfortunately quite
+ common across various business languages as well."""
+ result = 0
+ if 'BEGIN' in text:
+ result += 0.01
+ if 'END' in text:
+ result += 0.01
+ if 'PROCEDURE' in text:
+ result += 0.01
+ if 'END' in text:
+ result += 0.01
+
+ return result
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
new file mode 100644
index 0000000..c9c4278
--- /dev/null
+++ b/pygments/lexers/objective.py
@@ -0,0 +1,505 @@
+"""
+ pygments.lexers.objective
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Objective-C family languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \
+ inherit, default
+from pygments.token import Text, Keyword, Name, String, Operator, \
+ Number, Punctuation, Literal, Comment
+
+from pygments.lexers.c_cpp import CLexer, CppLexer
+
+__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer']
+
+
+def objective(baselexer):
+ """
+ Generate a subclass of baselexer that accepts the Objective-C syntax
+ extensions.
+ """
+
+ # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,
+ # since that's quite common in ordinary C/C++ files. It's OK to match
+ # JavaDoc/Doxygen keywords that only apply to Objective-C, mind.
+ #
+ # The upshot of this is that we CANNOT match @class or @interface
+ _oc_keywords = re.compile(r'@(?:end|implementation|protocol)')
+
+ # Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : )
+ # (note the identifier is *optional* when there is a ':'!)
+ _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+'
+ r'(?:[a-zA-Z_]\w*\s*\]|'
+ r'(?:[a-zA-Z_]\w*)?:)')
+
+ class GeneratedObjectiveCVariant(baselexer):
+ """
+ Implements Objective-C syntax on top of an existing C family lexer.
+ """
+
+ tokens = {
+ 'statements': [
+ (r'@"', String, 'string'),
+ (r'@(YES|NO)', Number),
+ (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
+ (r'@0[0-7]+[Ll]?', Number.Oct),
+ (r'@\d+[Ll]?', Number.Integer),
+ (r'@\(', Literal, 'literal_number'),
+ (r'@\[', Literal, 'literal_array'),
+ (r'@\{', Literal, 'literal_dictionary'),
+ (words((
+ '@selector', '@private', '@protected', '@public', '@encode',
+ '@synchronized', '@try', '@throw', '@catch', '@finally',
+ '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer',
+ '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong',
+ 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic',
+ 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in',
+ 'out', 'inout', 'release', 'class', '@dynamic', '@optional',
+ '@required', '@autoreleasepool', '@import'), suffix=r'\b'),
+ Keyword),
+ (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL',
+ 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'),
+ Keyword.Type),
+ (r'@(true|false|YES|NO)\n', Name.Builtin),
+ (r'(YES|NO|nil|self|super)\b', Name.Builtin),
+ # Carbon types
+ (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type),
+ # Carbon built-ins
+ (r'(TRUE|FALSE)\b', Name.Builtin),
+ (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
+ ('#pop', 'oc_classname')),
+ (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
+ ('#pop', 'oc_forward_classname')),
+ # @ can also prefix other expressions like @{...} or @(...)
+ (r'@', Punctuation),
+ inherit,
+ ],
+ 'oc_classname': [
+ # interface definition that inherits
+ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)',
+ bygroups(Name.Class, Text, Name.Class, Text, Punctuation),
+ ('#pop', 'oc_ivars')),
+ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
+ bygroups(Name.Class, Text, Name.Class), '#pop'),
+ # interface definition for a category
+ (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)',
+ bygroups(Name.Class, Text, Name.Label, Text, Punctuation),
+ ('#pop', 'oc_ivars')),
+ (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))',
+ bygroups(Name.Class, Text, Name.Label), '#pop'),
+ # simple interface / implementation
+ (r'([a-zA-Z$_][\w$]*)(\s*)(\{)',
+ bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')),
+ (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
+ ],
+ 'oc_forward_classname': [
+ (r'([a-zA-Z$_][\w$]*)(\s*,\s*)',
+ bygroups(Name.Class, Text), 'oc_forward_classname'),
+ (r'([a-zA-Z$_][\w$]*)(\s*;?)',
+ bygroups(Name.Class, Text), '#pop')
+ ],
+ 'oc_ivars': [
+ include('whitespace'),
+ include('statements'),
+ (';', Punctuation),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'root': [
+ # methods
+ (r'^([-+])(\s*)' # method marker
+ r'(\(.*?\))?(\s*)' # return type
+ r'([a-zA-Z$_][\w$]*:?)', # begin of method name
+ bygroups(Punctuation, Text, using(this),
+ Text, Name.Function),
+ 'method'),
+ inherit,
+ ],
+ 'method': [
+ include('whitespace'),
+ # TODO unsure if ellipses are allowed elsewhere, see
+ # discussion in Issue 789
+ (r',', Punctuation),
+ (r'\.\.\.', Punctuation),
+ (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)',
+ bygroups(using(this), Text, Name.Variable)),
+ (r'[a-zA-Z$_][\w$]*:', Name.Function),
+ (';', Punctuation, '#pop'),
+ (r'\{', Punctuation, 'function'),
+ default('#pop'),
+ ],
+ 'literal_number': [
+ (r'\(', Punctuation, 'literal_number_inner'),
+ (r'\)', Literal, '#pop'),
+ include('statement'),
+ ],
+ 'literal_number_inner': [
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop'),
+ include('statement'),
+ ],
+ 'literal_array': [
+ (r'\[', Punctuation, 'literal_array_inner'),
+ (r'\]', Literal, '#pop'),
+ include('statement'),
+ ],
+ 'literal_array_inner': [
+ (r'\[', Punctuation, '#push'),
+ (r'\]', Punctuation, '#pop'),
+ include('statement'),
+ ],
+ 'literal_dictionary': [
+ (r'\}', Literal, '#pop'),
+ include('statement'),
+ ],
+ }
+
+ def analyse_text(text):
+ if _oc_keywords.search(text):
+ return 1.0
+ elif '@"' in text: # strings
+ return 0.8
+ elif re.search('@[0-9]+', text):
+ return 0.7
+ elif _oc_message.search(text):
+ return 0.8
+ return 0
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \
+ COCOA_PROTOCOLS, COCOA_PRIMITIVES
+
+ for index, token, value in \
+ baselexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name or token is Name.Class:
+ if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
+ or value in COCOA_PRIMITIVES:
+ token = Name.Builtin.Pseudo
+
+ yield index, token, value
+
+ return GeneratedObjectiveCVariant
+
+
+class ObjectiveCLexer(objective(CLexer)):
+ """
+ For Objective-C source code with preprocessor directives.
+ """
+
+ name = 'Objective-C'
+ url = 'https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ProgrammingWithObjectiveC/Introduction/Introduction.html'
+ aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']
+ filenames = ['*.m', '*.h']
+ mimetypes = ['text/x-objective-c']
+ priority = 0.05 # Lower than C
+
+
+class ObjectiveCppLexer(objective(CppLexer)):
+ """
+ For Objective-C++ source code with preprocessor directives.
+ """
+
+ name = 'Objective-C++'
+ aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++']
+ filenames = ['*.mm', '*.hh']
+ mimetypes = ['text/x-objective-c++']
+ priority = 0.05 # Lower than C++
+
+
+class LogosLexer(ObjectiveCppLexer):
+ """
+ For Logos + Objective-C source code with preprocessor directives.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Logos'
+ aliases = ['logos']
+ filenames = ['*.x', '*.xi', '*.xm', '*.xmi']
+ mimetypes = ['text/x-logos']
+ priority = 0.25
+
+ tokens = {
+ 'statements': [
+ (r'(%orig|%log)\b', Keyword),
+ (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))',
+ bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)),
+ (r'(%init)\b(\()',
+ bygroups(Keyword, Punctuation), 'logos_init_directive'),
+ (r'(%init)(?=\s*;)', bygroups(Keyword)),
+ (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
+ bygroups(Keyword, Text, Name.Class), '#pop'),
+ (r'(%subclass)(\s+)', bygroups(Keyword, Text),
+ ('#pop', 'logos_classname')),
+ inherit,
+ ],
+ 'logos_init_directive': [
+ (r'\s+', Text),
+ (',', Punctuation, ('logos_init_directive', '#pop')),
+ (r'([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)',
+ bygroups(Name.Class, Text, Punctuation, Text, Text)),
+ (r'([a-zA-Z$_][\w$]*)', Name.Class),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'logos_classname': [
+ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
+ bygroups(Name.Class, Text, Name.Class), '#pop'),
+ (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
+ ],
+ 'root': [
+ (r'(%subclass)(\s+)', bygroups(Keyword, Text),
+ 'logos_classname'),
+ (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
+ bygroups(Keyword, Text, Name.Class)),
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
+ bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
+ (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
+ 'function'),
+ (r'(%new)(\s*)(\()(.*?)(\))',
+ bygroups(Keyword, Text, Keyword, String, Keyword)),
+ (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
+ inherit,
+ ],
+ }
+
+ _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()')
+
+ def analyse_text(text):
+ if LogosLexer._logos_keywords.search(text):
+ return 1.0
+ return 0
+
+
+class SwiftLexer(RegexLexer):
+ """
+ For Swift source.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Swift'
+ url = 'https://www.swift.org/'
+ filenames = ['*.swift']
+ aliases = ['swift']
+ mimetypes = ['text/x-swift']
+
+ tokens = {
+ 'root': [
+ # Whitespace and Comments
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'//', Comment.Single, 'comment-single'),
+ (r'/\*', Comment.Multiline, 'comment-multi'),
+ (r'#(if|elseif|else|endif|available)\b', Comment.Preproc, 'preproc'),
+
+ # Keywords
+ include('keywords'),
+
+ # Global Types
+ (words((
+ 'Array', 'AutoreleasingUnsafeMutablePointer', 'BidirectionalReverseView',
+ 'Bit', 'Bool', 'CFunctionPointer', 'COpaquePointer', 'CVaListPointer',
+ 'Character', 'ClosedInterval', 'CollectionOfOne', 'ContiguousArray',
+ 'Dictionary', 'DictionaryGenerator', 'DictionaryIndex', 'Double',
+ 'EmptyCollection', 'EmptyGenerator', 'EnumerateGenerator',
+ 'EnumerateSequence', 'FilterCollectionView',
+ 'FilterCollectionViewIndex', 'FilterGenerator', 'FilterSequenceView',
+ 'Float', 'Float80', 'FloatingPointClassification', 'GeneratorOf',
+ 'GeneratorOfOne', 'GeneratorSequence', 'HalfOpenInterval', 'HeapBuffer',
+ 'HeapBufferStorage', 'ImplicitlyUnwrappedOptional', 'IndexingGenerator',
+ 'Int', 'Int16', 'Int32', 'Int64', 'Int8', 'LazyBidirectionalCollection',
+ 'LazyForwardCollection', 'LazyRandomAccessCollection',
+ 'LazySequence', 'MapCollectionView', 'MapSequenceGenerator',
+ 'MapSequenceView', 'MirrorDisposition', 'ObjectIdentifier', 'OnHeap',
+ 'Optional', 'PermutationGenerator', 'QuickLookObject',
+ 'RandomAccessReverseView', 'Range', 'RangeGenerator', 'RawByte', 'Repeat',
+ 'ReverseBidirectionalIndex', 'ReverseRandomAccessIndex', 'SequenceOf',
+ 'SinkOf', 'Slice', 'StaticString', 'StrideThrough', 'StrideThroughGenerator',
+ 'StrideTo', 'StrideToGenerator', 'String', 'UInt', 'UInt16', 'UInt32',
+ 'UInt64', 'UInt8', 'UTF16', 'UTF32', 'UTF8', 'UnicodeDecodingResult',
+ 'UnicodeScalar', 'Unmanaged', 'UnsafeBufferPointer',
+ 'UnsafeBufferPointerGenerator', 'UnsafeMutableBufferPointer',
+ 'UnsafeMutablePointer', 'UnsafePointer', 'Zip2', 'ZipGenerator2',
+ # Protocols
+ 'AbsoluteValuable', 'AnyObject', 'ArrayLiteralConvertible',
+ 'BidirectionalIndexType', 'BitwiseOperationsType',
+ 'BooleanLiteralConvertible', 'BooleanType', 'CVarArgType',
+ 'CollectionType', 'Comparable', 'DebugPrintable',
+ 'DictionaryLiteralConvertible', 'Equatable',
+ 'ExtendedGraphemeClusterLiteralConvertible',
+ 'ExtensibleCollectionType', 'FloatLiteralConvertible',
+ 'FloatingPointType', 'ForwardIndexType', 'GeneratorType', 'Hashable',
+ 'IntegerArithmeticType', 'IntegerLiteralConvertible', 'IntegerType',
+ 'IntervalType', 'MirrorType', 'MutableCollectionType', 'MutableSliceable',
+ 'NilLiteralConvertible', 'OutputStreamType', 'Printable',
+ 'RandomAccessIndexType', 'RangeReplaceableCollectionType',
+ 'RawOptionSetType', 'RawRepresentable', 'Reflectable', 'SequenceType',
+ 'SignedIntegerType', 'SignedNumberType', 'SinkType', 'Sliceable',
+ 'Streamable', 'Strideable', 'StringInterpolationConvertible',
+ 'StringLiteralConvertible', 'UnicodeCodecType',
+ 'UnicodeScalarLiteralConvertible', 'UnsignedIntegerType',
+ '_ArrayBufferType', '_BidirectionalIndexType', '_CocoaStringType',
+ '_CollectionType', '_Comparable', '_ExtensibleCollectionType',
+ '_ForwardIndexType', '_Incrementable', '_IntegerArithmeticType',
+ '_IntegerType', '_ObjectiveCBridgeable', '_RandomAccessIndexType',
+ '_RawOptionSetType', '_SequenceType', '_Sequence_Type',
+ '_SignedIntegerType', '_SignedNumberType', '_Sliceable', '_Strideable',
+ '_SwiftNSArrayRequiredOverridesType', '_SwiftNSArrayType',
+ '_SwiftNSCopyingType', '_SwiftNSDictionaryRequiredOverridesType',
+ '_SwiftNSDictionaryType', '_SwiftNSEnumeratorType',
+ '_SwiftNSFastEnumerationType', '_SwiftNSStringRequiredOverridesType',
+ '_SwiftNSStringType', '_UnsignedIntegerType',
+ # Variables
+ 'C_ARGC', 'C_ARGV', 'Process',
+ # Typealiases
+ 'Any', 'AnyClass', 'BooleanLiteralType', 'CBool', 'CChar', 'CChar16',
+ 'CChar32', 'CDouble', 'CFloat', 'CInt', 'CLong', 'CLongLong', 'CShort',
+ 'CSignedChar', 'CUnsignedInt', 'CUnsignedLong', 'CUnsignedShort',
+ 'CWideChar', 'ExtendedGraphemeClusterType', 'Float32', 'Float64',
+ 'FloatLiteralType', 'IntMax', 'IntegerLiteralType', 'StringLiteralType',
+ 'UIntMax', 'UWord', 'UnicodeScalarType', 'Void', 'Word',
+ # Foundation/Cocoa
+ 'NSErrorPointer', 'NSObjectProtocol', 'Selector'), suffix=r'\b'),
+ Name.Builtin),
+ # Functions
+ (words((
+ 'abs', 'advance', 'alignof', 'alignofValue', 'assert', 'assertionFailure',
+ 'contains', 'count', 'countElements', 'debugPrint', 'debugPrintln',
+ 'distance', 'dropFirst', 'dropLast', 'dump', 'enumerate', 'equal',
+ 'extend', 'fatalError', 'filter', 'find', 'first', 'getVaList', 'indices',
+ 'insert', 'isEmpty', 'join', 'last', 'lazy', 'lexicographicalCompare',
+ 'map', 'max', 'maxElement', 'min', 'minElement', 'numericCast', 'overlaps',
+ 'partition', 'precondition', 'preconditionFailure', 'prefix', 'print',
+ 'println', 'reduce', 'reflect', 'removeAll', 'removeAtIndex', 'removeLast',
+ 'removeRange', 'reverse', 'sizeof', 'sizeofValue', 'sort', 'sorted',
+ 'splice', 'split', 'startsWith', 'stride', 'strideof', 'strideofValue',
+ 'suffix', 'swap', 'toDebugString', 'toString', 'transcode',
+ 'underestimateCount', 'unsafeAddressOf', 'unsafeBitCast', 'unsafeDowncast',
+ 'withExtendedLifetime', 'withUnsafeMutablePointer',
+ 'withUnsafeMutablePointers', 'withUnsafePointer', 'withUnsafePointers',
+ 'withVaList'), suffix=r'\b'),
+ Name.Builtin.Pseudo),
+
+ # Implicit Block Variables
+ (r'\$\d+', Name.Variable),
+
+ # Binary Literal
+ (r'0b[01_]+', Number.Bin),
+ # Octal Literal
+ (r'0o[0-7_]+', Number.Oct),
+ # Hexadecimal Literal
+ (r'0x[0-9a-fA-F_]+', Number.Hex),
+ # Decimal Literal
+ (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+ r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float),
+ (r'[0-9][0-9_]*', Number.Integer),
+ # String Literal
+ (r'"', String, 'string'),
+
+ # Operators and Punctuation
+ (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation),
+ (r'[/=\-+!*%<>&|^?~]+', Operator),
+
+ # Identifier
+ (r'[a-zA-Z_]\w*', Name)
+ ],
+ 'keywords': [
+ (words((
+ 'as', 'async', 'await', 'break', 'case', 'catch', 'continue', 'default', 'defer',
+ 'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'is',
+ 'repeat', 'return', '#selector', 'switch', 'throw', 'try',
+ 'where', 'while'), suffix=r'\b'),
+ Keyword),
+ (r'@availability\([^)]+\)', Keyword.Reserved),
+ (words((
+ 'associativity', 'convenience', 'dynamic', 'didSet', 'final',
+ 'get', 'indirect', 'infix', 'inout', 'lazy', 'left', 'mutating',
+ 'none', 'nonmutating', 'optional', 'override', 'postfix',
+ 'precedence', 'prefix', 'Protocol', 'required', 'rethrows',
+ 'right', 'set', 'throws', 'Type', 'unowned', 'weak', 'willSet',
+ '@availability', '@autoclosure', '@noreturn',
+ '@NSApplicationMain', '@NSCopying', '@NSManaged', '@objc',
+ '@UIApplicationMain', '@IBAction', '@IBDesignable',
+ '@IBInspectable', '@IBOutlet'), suffix=r'\b'),
+ Keyword.Reserved),
+ (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__'
+ r'|__FILE__|__FUNCTION__|__LINE__|_'
+ r'|#(?:file|line|column|function))\b', Keyword.Constant),
+ (r'import\b', Keyword.Declaration, 'module'),
+ (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword.Declaration, Text, Name.Class)),
+ (r'(func)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword.Declaration, Text, Name.Function)),
+ (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration,
+ Text, Name.Variable)),
+ (words((
+ 'actor', 'associatedtype', 'class', 'deinit', 'enum', 'extension', 'func', 'import',
+ 'init', 'internal', 'let', 'operator', 'private', 'protocol', 'public',
+ 'static', 'struct', 'subscript', 'typealias', 'var'), suffix=r'\b'),
+ Keyword.Declaration)
+ ],
+ 'comment': [
+ (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):',
+ Comment.Special)
+ ],
+
+ # Nested
+ 'comment-single': [
+ (r'\n', Text, '#pop'),
+ include('comment'),
+ (r'[^\n]', Comment.Single)
+ ],
+ 'comment-multi': [
+ include('comment'),
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'module': [
+ (r'\n', Text, '#pop'),
+ (r'[a-zA-Z_]\w*', Name.Class),
+ include('root')
+ ],
+ 'preproc': [
+ (r'\n', Text, '#pop'),
+ include('keywords'),
+ (r'[A-Za-z]\w*', Comment.Preproc),
+ include('root')
+ ],
+ 'string': [
+ (r'\\\(', String.Interpol, 'string-intp'),
+ (r'"', String, '#pop'),
+ (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
+ r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape),
+ (r'[^\\"]+', String),
+ (r'\\', String)
+ ],
+ 'string-intp': [
+ (r'\(', String.Interpol, '#push'),
+ (r'\)', String.Interpol, '#pop'),
+ include('root')
+ ]
+ }
+
+ def get_tokens_unprocessed(self, text):
+ from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \
+ COCOA_PROTOCOLS, COCOA_PRIMITIVES
+
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name or token is Name.Class:
+ if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
+ or value in COCOA_PRIMITIVES:
+ token = Name.Builtin.Pseudo
+
+ yield index, token, value
diff --git a/pygments/lexers/ooc.py b/pygments/lexers/ooc.py
new file mode 100644
index 0000000..5b1df2c
--- /dev/null
+++ b/pygments/lexers/ooc.py
@@ -0,0 +1,85 @@
+"""
+ pygments.lexers.ooc
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Ooc language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['OocLexer']
+
+
+class OocLexer(RegexLexer):
+ """
+ For Ooc source code
+
+ .. versionadded:: 1.2
+ """
+ name = 'Ooc'
+ url = 'http://ooc-lang.org/'
+ aliases = ['ooc']
+ filenames = ['*.ooc']
+ mimetypes = ['text/x-ooc']
+
+ tokens = {
+ 'root': [
+ (words((
+ 'class', 'interface', 'implement', 'abstract', 'extends', 'from',
+ 'this', 'super', 'new', 'const', 'final', 'static', 'import',
+ 'use', 'extern', 'inline', 'proto', 'break', 'continue',
+ 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do',
+ 'switch', 'case', 'as', 'in', 'version', 'return', 'true',
+ 'false', 'null'), prefix=r'\b', suffix=r'\b'),
+ Keyword),
+ (r'include\b', Keyword, 'include'),
+ (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)',
+ bygroups(Keyword, Text, Keyword, Text, Name.Class)),
+ (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)',
+ bygroups(Keyword, Text, Name.Function)),
+ (r'\bfunc\b', Keyword),
+ # Note: %= and ^= not listed on http://ooc-lang.org/syntax
+ (r'//.*', Comment),
+ (r'(?s)/\*.*?\*/', Comment.Multiline),
+ (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
+ r'&&?|\|\|?|\^=?)', Operator),
+ (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
+ Name.Function)),
+ (r'[A-Z][A-Z0-9_]+', Name.Constant),
+ (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class),
+
+ (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()',
+ bygroups(Name.Function, Text)),
+ (r'[a-z]\w*', Name.Variable),
+
+ # : introduces types
+ (r'[:(){}\[\];,]', Punctuation),
+
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'0c[0-9]+', Number.Oct),
+ (r'0b[01]+', Number.Bin),
+ (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
+ (r'[0-9_]+', Number.Decimal),
+
+ (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"',
+ String.Double),
+ (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
+ String.Char),
+ (r'@', Punctuation), # pointer dereference
+ (r'\.', Punctuation), # imports or chain operator
+
+ (r'\\[ \t\n]', Text),
+ (r'[ \t]+', Text),
+ ],
+ 'include': [
+ (r'[\w/]+', Name),
+ (r',', Punctuation),
+ (r'[ \t]', Text),
+ (r'[;\n]', Text, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py
new file mode 100644
index 0000000..6c73c01
--- /dev/null
+++ b/pygments/lexers/other.py
@@ -0,0 +1,40 @@
+"""
+ pygments.lexers.other
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Just export lexer classes previously contained in this module.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer
+from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \
+ TcshLexer
+from pygments.lexers.robotframework import RobotFrameworkLexer
+from pygments.lexers.testing import GherkinLexer
+from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer
+from pygments.lexers.prolog import LogtalkLexer
+from pygments.lexers.snobol import SnobolLexer
+from pygments.lexers.rebol import RebolLexer
+from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer
+from pygments.lexers.modeling import ModelicaLexer
+from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \
+ HybrisLexer
+from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \
+ AsymptoteLexer, PovrayLexer
+from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \
+ GoodDataCLLexer, MaqlLexer
+from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer
+from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \
+ MscgenLexer, VGLLexer
+from pygments.lexers.basic import CbmBasicV2Lexer
+from pygments.lexers.pawn import SourcePawnLexer, PawnLexer
+from pygments.lexers.ecl import ECLLexer
+from pygments.lexers.urbi import UrbiscriptLexer
+from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer
+from pygments.lexers.installers import NSISLexer, RPMSpecLexer
+from pygments.lexers.textedit import AwkLexer
+from pygments.lexers.smv import NuSMVLexer
+
+__all__ = []
diff --git a/pygments/lexers/parasail.py b/pygments/lexers/parasail.py
new file mode 100644
index 0000000..de50c65
--- /dev/null
+++ b/pygments/lexers/parasail.py
@@ -0,0 +1,79 @@
+"""
+ pygments.lexers.parasail
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for ParaSail.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal
+
+__all__ = ['ParaSailLexer']
+
+
+class ParaSailLexer(RegexLexer):
+ """
+ For ParaSail source code.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'ParaSail'
+ url = 'http://www.parasail-lang.org'
+ aliases = ['parasail']
+ filenames = ['*.psi', '*.psl']
+ mimetypes = ['text/x-parasail']
+
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'\b(and|or|xor)=', Operator.Word),
+ (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|'
+ r'(is|not)\s+null)\b',
+ Operator.Word),
+ # Keywords
+ (r'\b(abs|abstract|all|block|class|concurrent|const|continue|'
+ r'each|end|exit|extends|exports|forward|func|global|implements|'
+ r'import|in|interface|is|lambda|locked|new|not|null|of|op|'
+ r'optional|private|queued|ref|return|reverse|separate|some|'
+ r'type|until|var|with|'
+ # Control flow
+ r'if|then|else|elsif|case|for|while|loop)\b',
+ Keyword.Reserved),
+ (r'(abstract\s+)?(interface|class|op|func|type)',
+ Keyword.Declaration),
+ # Literals
+ (r'"[^"]*"', String),
+ (r'\\[\'ntrf"0]', String.Escape),
+ (r'#[a-zA-Z]\w*', Literal), # Enumeration
+ include('numbers'),
+ (r"'[^']'", String.Char),
+ (r'[a-zA-Z]\w*', Name),
+ # Operators and Punctuation
+ (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|'
+ r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|'
+ r'\.\.|<\.\.|\.\.<|<\.\.<)',
+ Operator),
+ (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)',
+ Punctuation),
+ (r'\n+', Text),
+ ],
+ 'numbers': [
+ (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base
+ (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex
+ (r'0[bB][01][01_]*', Number.Bin), # C-like bin
+ (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp
+ Number.Float),
+ (r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float
+ (r'\d[0-9_]*', Number.Integer), # integer
+ ],
+ }
diff --git a/pygments/lexers/parsers.py b/pygments/lexers/parsers.py
new file mode 100644
index 0000000..20a0cc3
--- /dev/null
+++ b/pygments/lexers/parsers.py
@@ -0,0 +1,801 @@
+"""
+ pygments.lexers.parsers
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for parser generators.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, DelegatingLexer, \
+ include, bygroups, using
+from pygments.token import Punctuation, Other, Text, Comment, Operator, \
+ Keyword, Name, String, Number, Whitespace
+from pygments.lexers.jvm import JavaLexer
+from pygments.lexers.c_cpp import CLexer, CppLexer
+from pygments.lexers.objective import ObjectiveCLexer
+from pygments.lexers.d import DLexer
+from pygments.lexers.dotnet import CSharpLexer
+from pygments.lexers.ruby import RubyLexer
+from pygments.lexers.python import PythonLexer
+from pygments.lexers.perl import PerlLexer
+
+__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
+ 'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
+ 'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
+ 'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
+ 'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
+ 'AntlrJavaLexer', 'AntlrActionScriptLexer',
+ 'TreetopLexer', 'EbnfLexer']
+
+
+class RagelLexer(RegexLexer):
+ """A pure `Ragel <www.colm.net/open-source/ragel>`_ lexer. Use this
+ for fragments of Ragel. For ``.rl`` files, use
+ :class:`RagelEmbeddedLexer` instead (or one of the
+ language-specific subclasses).
+
+ .. versionadded:: 1.1
+
+ """
+
+ name = 'Ragel'
+ url = 'http://www.colm.net/open-source/ragel/'
+ aliases = ['ragel']
+ filenames = []
+
+ tokens = {
+ 'whitespace': [
+ (r'\s+', Whitespace)
+ ],
+ 'comments': [
+ (r'\#.*$', Comment),
+ ],
+ 'keywords': [
+ (r'(access|action|alphtype)\b', Keyword),
+ (r'(getkey|write|machine|include)\b', Keyword),
+ (r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
+ (r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
+ ],
+ 'numbers': [
+ (r'0x[0-9A-Fa-f]+', Number.Hex),
+ (r'[+-]?[0-9]+', Number.Integer),
+ ],
+ 'literals': [
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'\[(\\\\|\\[^\\]|[^\\\]])*\]', String), # square bracket literals
+ (r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/', String.Regex), # regular expressions
+ ],
+ 'identifiers': [
+ (r'[a-zA-Z_]\w*', Name.Variable),
+ ],
+ 'operators': [
+ (r',', Operator), # Join
+ (r'\||&|--?', Operator), # Union, Intersection and Subtraction
+ (r'\.|<:|:>>?', Operator), # Concatention
+ (r':', Operator), # Label
+ (r'->', Operator), # Epsilon Transition
+ (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
+ (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
+ (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
+ (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
+ (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
+ (r'>|@|\$|%', Operator), # Transition Actions and Priorities
+ (r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition
+ (r'!|\^', Operator), # Negation
+ (r'\(|\)', Operator), # Grouping
+ ],
+ 'root': [
+ include('literals'),
+ include('whitespace'),
+ include('comments'),
+ include('keywords'),
+ include('numbers'),
+ include('identifiers'),
+ include('operators'),
+ (r'\{', Punctuation, 'host'),
+ (r'=', Operator),
+ (r';', Punctuation),
+ ],
+ 'host': [
+ (r'(' + r'|'.join(( # keep host code in largest possible chunks
+ r'[^{}\'"/#]+', # exclude unsafe characters
+ r'[^\\]\\[{}]', # allow escaped { or }
+
+ # strings and comments may safely contain unsafe characters
+ r'"(\\\\|\\[^\\]|[^"\\])*"',
+ r"'(\\\\|\\[^\\]|[^'\\])*'",
+ r'//.*$\n?', # single line comment
+ r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
+ r'\#.*$\n?', # ruby comment
+
+ # regular expression: There's no reason for it to start
+ # with a * and this stops confusion with comments.
+ r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
+
+ # / is safe now that we've handled regex and javadoc comments
+ r'/',
+ )) + r')+', Other),
+
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ }
+
+
+class RagelEmbeddedLexer(RegexLexer):
+ """
+ A lexer for Ragel embedded in a host language file.
+
+ This will only highlight Ragel statements. If you want host language
+ highlighting then call the language-specific Ragel lexer.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Embedded Ragel'
+ aliases = ['ragel-em']
+ filenames = ['*.rl']
+
+ tokens = {
+ 'root': [
+ (r'(' + r'|'.join(( # keep host code in largest possible chunks
+ r'[^%\'"/#]+', # exclude unsafe characters
+ r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
+
+ # strings and comments may safely contain unsafe characters
+ r'"(\\\\|\\[^\\]|[^"\\])*"',
+ r"'(\\\\|\\[^\\]|[^'\\])*'",
+ r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
+ r'//.*$\n?', # single line comment
+ r'\#.*$\n?', # ruby/ragel comment
+ r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/', # regular expression
+
+ # / is safe now that we've handled regex and javadoc comments
+ r'/',
+ )) + r')+', Other),
+
+ # Single Line FSM.
+ # Please don't put a quoted newline in a single line FSM.
+ # That's just mean. It will break this.
+ (r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
+ using(RagelLexer),
+ Punctuation, Text)),
+
+ # Multi Line FSM.
+ (r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'),
+ ],
+ 'multi-line-fsm': [
+ (r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
+ r'(' + r'|'.join((
+ r'[^}\'"\[/#]', # exclude unsafe characters
+ r'\}(?=[^%]|$)', # } is okay as long as it's not followed by %
+ r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
+ r'[^\\]\\[{}]', # ...and } is okay if it's escaped
+
+ # allow / if it's preceded with one of these symbols
+ # (ragel EOF actions)
+ r'(>|\$|%|<|@|<>)/',
+
+ # specifically allow regex followed immediately by *
+ # so it doesn't get mistaken for a comment
+ r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/\*',
+
+ # allow / as long as it's not followed by another / or by a *
+ r'/(?=[^/*]|$)',
+
+ # We want to match as many of these as we can in one block.
+ # Not sure if we need the + sign here,
+ # does it help performance?
+ )) + r')+',
+
+ # strings and comments may safely contain unsafe characters
+ r'"(\\\\|\\[^\\]|[^"\\])*"',
+ r"'(\\\\|\\[^\\]|[^'\\])*'",
+ r"\[(\\\\|\\[^\\]|[^\]\\])*\]", # square bracket literal
+ r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
+ r'//.*$\n?', # single line comment
+ r'\#.*$\n?', # ruby/ragel comment
+ )) + r')+', using(RagelLexer)),
+
+ (r'\}%%', Punctuation, '#pop'),
+ ]
+ }
+
+ def analyse_text(text):
+ return '@LANG: indep' in text
+
+
+class RagelRubyLexer(DelegatingLexer):
+ """
+ A lexer for Ragel in a Ruby host file.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Ragel in Ruby Host'
+ aliases = ['ragel-ruby', 'ragel-rb']
+ filenames = ['*.rl']
+
+ def __init__(self, **options):
+ super().__init__(RubyLexer, RagelEmbeddedLexer, **options)
+
+ def analyse_text(text):
+ return '@LANG: ruby' in text
+
+
+class RagelCLexer(DelegatingLexer):
+ """
+ A lexer for Ragel in a C host file.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Ragel in C Host'
+ aliases = ['ragel-c']
+ filenames = ['*.rl']
+
+ def __init__(self, **options):
+ super().__init__(CLexer, RagelEmbeddedLexer, **options)
+
+ def analyse_text(text):
+ return '@LANG: c' in text
+
+
+class RagelDLexer(DelegatingLexer):
+ """
+ A lexer for Ragel in a D host file.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Ragel in D Host'
+ aliases = ['ragel-d']
+ filenames = ['*.rl']
+
+ def __init__(self, **options):
+ super().__init__(DLexer, RagelEmbeddedLexer, **options)
+
+ def analyse_text(text):
+ return '@LANG: d' in text
+
+
+class RagelCppLexer(DelegatingLexer):
+ """
+ A lexer for Ragel in a C++ host file.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Ragel in CPP Host'
+ aliases = ['ragel-cpp']
+ filenames = ['*.rl']
+
+ def __init__(self, **options):
+ super().__init__(CppLexer, RagelEmbeddedLexer, **options)
+
+ def analyse_text(text):
+ return '@LANG: c++' in text
+
+
+class RagelObjectiveCLexer(DelegatingLexer):
+ """
+ A lexer for Ragel in an Objective C host file.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Ragel in Objective C Host'
+ aliases = ['ragel-objc']
+ filenames = ['*.rl']
+
+ def __init__(self, **options):
+ super().__init__(ObjectiveCLexer, RagelEmbeddedLexer, **options)
+
+ def analyse_text(text):
+ return '@LANG: objc' in text
+
+
+class RagelJavaLexer(DelegatingLexer):
+ """
+ A lexer for Ragel in a Java host file.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Ragel in Java Host'
+ aliases = ['ragel-java']
+ filenames = ['*.rl']
+
+ def __init__(self, **options):
+ super().__init__(JavaLexer, RagelEmbeddedLexer, **options)
+
+ def analyse_text(text):
+ return '@LANG: java' in text
+
+
+class AntlrLexer(RegexLexer):
+ """
+ Generic `ANTLR`_ Lexer.
+ Should not be called directly, instead
+ use DelegatingLexer for your target language.
+
+ .. versionadded:: 1.1
+
+ .. _ANTLR: http://www.antlr.org/
+ """
+
+ name = 'ANTLR'
+ aliases = ['antlr']
+ filenames = []
+
+ _id = r'[A-Za-z]\w*'
+ _TOKEN_REF = r'[A-Z]\w*'
+ _RULE_REF = r'[a-z]\w*'
+ _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
+ _INT = r'[0-9]+'
+
+ tokens = {
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ 'comments': [
+ (r'//.*$', Comment),
+ (r'/\*(.|\n)*?\*/', Comment),
+ ],
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+
+ (r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
+ Punctuation)),
+ # optionsSpec
+ (r'options\b', Keyword, 'options'),
+ # tokensSpec
+ (r'tokens\b', Keyword, 'tokens'),
+ # attrScope
+ (r'(scope)(\s*)(' + _id + r')(\s*)(\{)',
+ bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
+ Punctuation), 'action'),
+ # exception
+ (r'(catch|finally)\b', Keyword, 'exception'),
+ # action
+ (r'(@' + _id + r')(\s*)(::)?(\s*)(' + _id + r')(\s*)(\{)',
+ bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
+ Name.Label, Whitespace, Punctuation), 'action'),
+ # rule
+ (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?',
+ bygroups(Keyword, Whitespace, Name.Label, Punctuation),
+ ('rule-alts', 'rule-prelims')),
+ ],
+ 'exception': [
+ (r'\n', Whitespace, '#pop'),
+ (r'\s', Whitespace),
+ include('comments'),
+
+ (r'\[', Punctuation, 'nested-arg-action'),
+ (r'\{', Punctuation, 'action'),
+ ],
+ 'rule-prelims': [
+ include('whitespace'),
+ include('comments'),
+
+ (r'returns\b', Keyword),
+ (r'\[', Punctuation, 'nested-arg-action'),
+ (r'\{', Punctuation, 'action'),
+ # throwsSpec
+ (r'(throws)(\s+)(' + _id + ')',
+ bygroups(Keyword, Whitespace, Name.Label)),
+ (r'(,)(\s*)(' + _id + ')',
+ bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
+ # optionsSpec
+ (r'options\b', Keyword, 'options'),
+ # ruleScopeSpec - scope followed by target language code or name of action
+ # TODO finish implementing other possibilities for scope
+ # L173 ANTLRv3.g from ANTLR book
+ (r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation),
+ 'action'),
+ (r'(scope)(\s+)(' + _id + r')(\s*)(;)',
+ bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
+ # ruleAction
+ (r'(@' + _id + r')(\s*)(\{)',
+ bygroups(Name.Label, Whitespace, Punctuation), 'action'),
+ # finished prelims, go to rule alts!
+ (r':', Punctuation, '#pop')
+ ],
+ 'rule-alts': [
+ include('whitespace'),
+ include('comments'),
+
+ # These might need to go in a separate 'block' state triggered by (
+ (r'options\b', Keyword, 'options'),
+ (r':', Punctuation),
+
+ # literals
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'<<([^>]|>[^>])>>', String),
+ # identifiers
+ # Tokens start with capital letter.
+ (r'\$?[A-Z_]\w*', Name.Constant),
+ # Rules start with small letter.
+ (r'\$?[a-z_]\w*', Name.Variable),
+ # operators
+ (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
+ (r',', Punctuation),
+ (r'\[', Punctuation, 'nested-arg-action'),
+ (r'\{', Punctuation, 'action'),
+ (r';', Punctuation, '#pop')
+ ],
+ 'tokens': [
+ include('whitespace'),
+ include('comments'),
+ (r'\{', Punctuation),
+ (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+ + r')?(\s*)(;)',
+ bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
+ String, Whitespace, Punctuation)),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'options': [
+ include('whitespace'),
+ include('comments'),
+ (r'\{', Punctuation),
+ (r'(' + _id + r')(\s*)(=)(\s*)(' +
+ '|'.join((_id, _STRING_LITERAL, _INT, r'\*')) + r')(\s*)(;)',
+ bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
+ Text, Whitespace, Punctuation)),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'action': [
+ (r'(' + r'|'.join(( # keep host code in largest possible chunks
+ r'[^${}\'"/\\]+', # exclude unsafe characters
+
+ # strings and comments may safely contain unsafe characters
+ r'"(\\\\|\\[^\\]|[^"\\])*"',
+ r"'(\\\\|\\[^\\]|[^'\\])*'",
+ r'//.*$\n?', # single line comment
+ r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
+
+ # regular expression: There's no reason for it to start
+ # with a * and this stops confusion with comments.
+ r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
+
+ # backslashes are okay, as long as we are not backslashing a %
+ r'\\(?!%)',
+
+ # Now that we've handled regex and javadoc comments
+ # it's safe to let / through.
+ r'/',
+ )) + r')+', Other),
+ (r'(\\)(%)', bygroups(Punctuation, Other)),
+ (r'(\$[a-zA-Z]+)(\.?)(text|value)?',
+ bygroups(Name.Variable, Punctuation, Name.Property)),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'nested-arg-action': [
+ (r'(' + r'|'.join(( # keep host code in largest possible chunks.
+ r'[^$\[\]\'"/]+', # exclude unsafe characters
+
+ # strings and comments may safely contain unsafe characters
+ r'"(\\\\|\\[^\\]|[^"\\])*"',
+ r"'(\\\\|\\[^\\]|[^'\\])*'",
+ r'//.*$\n?', # single line comment
+ r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
+
+ # regular expression: There's no reason for it to start
+ # with a * and this stops confusion with comments.
+ r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
+
+ # Now that we've handled regex and javadoc comments
+ # it's safe to let / through.
+ r'/',
+ )) + r')+', Other),
+
+
+ (r'\[', Punctuation, '#push'),
+ (r'\]', Punctuation, '#pop'),
+ (r'(\$[a-zA-Z]+)(\.?)(text|value)?',
+ bygroups(Name.Variable, Punctuation, Name.Property)),
+ (r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
+ ]
+ }
+
+ def analyse_text(text):
+ return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
+
+
+# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
+
+class AntlrCppLexer(DelegatingLexer):
+ """
+ ANTLR with C++ Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With CPP Target'
+ aliases = ['antlr-cpp']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(CppLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
+
+
+class AntlrObjectiveCLexer(DelegatingLexer):
+ """
+ ANTLR with Objective-C Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With ObjectiveC Target'
+ aliases = ['antlr-objc']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(ObjectiveCLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
+
+
+class AntlrCSharpLexer(DelegatingLexer):
+ """
+ ANTLR with C# Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With C# Target'
+ aliases = ['antlr-csharp', 'antlr-c#']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(CSharpLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
+
+
+class AntlrPythonLexer(DelegatingLexer):
+ """
+ ANTLR with Python Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With Python Target'
+ aliases = ['antlr-python']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(PythonLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
+
+
+class AntlrJavaLexer(DelegatingLexer):
+ """
+ ANTLR with Java Target
+
+ .. versionadded:: 1.
+ """
+
+ name = 'ANTLR With Java Target'
+ aliases = ['antlr-java']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(JavaLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ # Antlr language is Java by default
+ return AntlrLexer.analyse_text(text) and 0.9
+
+
+class AntlrRubyLexer(DelegatingLexer):
+ """
+ ANTLR with Ruby Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With Ruby Target'
+ aliases = ['antlr-ruby', 'antlr-rb']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(RubyLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
+
+
+class AntlrPerlLexer(DelegatingLexer):
+ """
+ ANTLR with Perl Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With Perl Target'
+ aliases = ['antlr-perl']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ super().__init__(PerlLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
+
+
+class AntlrActionScriptLexer(DelegatingLexer):
+ """
+ ANTLR with ActionScript Target
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'ANTLR With ActionScript Target'
+ aliases = ['antlr-actionscript', 'antlr-as']
+ filenames = ['*.G', '*.g']
+
+ def __init__(self, **options):
+ from pygments.lexers.actionscript import ActionScriptLexer
+ super().__init__(ActionScriptLexer, AntlrLexer, **options)
+
+ def analyse_text(text):
+ return AntlrLexer.analyse_text(text) and \
+ re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
+
+
+class TreetopBaseLexer(RegexLexer):
+ """
+ A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
+ Not for direct use; use :class:`TreetopLexer` instead.
+
+ .. versionadded:: 1.6
+ """
+
+ tokens = {
+ 'root': [
+ include('space'),
+ (r'require[ \t]+[^\n\r]+[\n\r]', Other),
+ (r'module\b', Keyword.Namespace, 'module'),
+ (r'grammar\b', Keyword, 'grammar'),
+ ],
+ 'module': [
+ include('space'),
+ include('end'),
+ (r'module\b', Keyword, '#push'),
+ (r'grammar\b', Keyword, 'grammar'),
+ (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace),
+ ],
+ 'grammar': [
+ include('space'),
+ include('end'),
+ (r'rule\b', Keyword, 'rule'),
+ (r'include\b', Keyword, 'include'),
+ (r'[A-Z]\w*', Name),
+ ],
+ 'include': [
+ include('space'),
+ (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'),
+ ],
+ 'rule': [
+ include('space'),
+ include('end'),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)),
+ (r'[A-Za-z_]\w*', Name),
+ (r'[()]', Punctuation),
+ (r'[?+*/&!~]', Operator),
+ (r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex),
+ (r'([0-9]*)(\.\.)([0-9]*)',
+ bygroups(Number.Integer, Operator, Number.Integer)),
+ (r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)),
+ (r'\{', Punctuation, 'inline_module'),
+ (r'\.', String.Regex),
+ ],
+ 'inline_module': [
+ (r'\{', Other, 'ruby'),
+ (r'\}', Punctuation, '#pop'),
+ (r'[^{}]+', Other),
+ ],
+ 'ruby': [
+ (r'\{', Other, '#push'),
+ (r'\}', Other, '#pop'),
+ (r'[^{}]+', Other),
+ ],
+ 'space': [
+ (r'[ \t\n\r]+', Whitespace),
+ (r'#[^\n]*', Comment.Single),
+ ],
+ 'end': [
+ (r'end\b', Keyword, '#pop'),
+ ],
+ }
+
+
+class TreetopLexer(DelegatingLexer):
+ """
+ A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'Treetop'
+ aliases = ['treetop']
+ filenames = ['*.treetop', '*.tt']
+
+ def __init__(self, **options):
+ super().__init__(RubyLexer, TreetopBaseLexer, **options)
+
+
+class EbnfLexer(RegexLexer):
+ """
+ Lexer for `ISO/IEC 14977 EBNF
+ <http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
+ grammars.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'EBNF'
+ aliases = ['ebnf']
+ filenames = ['*.ebnf']
+ mimetypes = ['text/x-ebnf']
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comment_start'),
+ include('identifier'),
+ (r'=', Operator, 'production'),
+ ],
+ 'production': [
+ include('whitespace'),
+ include('comment_start'),
+ include('identifier'),
+ (r'"[^"]*"', String.Double),
+ (r"'[^']*'", String.Single),
+ (r'(\?[^?]*\?)', Name.Entity),
+ (r'[\[\]{}(),|]', Punctuation),
+ (r'-', Operator),
+ (r';', Punctuation, '#pop'),
+ (r'\.', Punctuation, '#pop'),
+ ],
+ 'whitespace': [
+ (r'\s+', Text),
+ ],
+ 'comment_start': [
+ (r'\(\*', Comment.Multiline, 'comment'),
+ ],
+ 'comment': [
+ (r'[^*)]', Comment.Multiline),
+ include('comment_start'),
+ (r'\*\)', Comment.Multiline, '#pop'),
+ (r'[*)]', Comment.Multiline),
+ ],
+ 'identifier': [
+ (r'([a-zA-Z][\w \-]*)', Keyword),
+ ],
+ }
diff --git a/pygments/lexers/pascal.py b/pygments/lexers/pascal.py
new file mode 100644
index 0000000..cab170d
--- /dev/null
+++ b/pygments/lexers/pascal.py
@@ -0,0 +1,641 @@
+"""
+ pygments.lexers.pascal
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Pascal family languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer
+from pygments.util import get_bool_opt, get_list_opt
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error, Whitespace
+from pygments.scanner import Scanner
+
+# compatibility import
+from pygments.lexers.modula2 import Modula2Lexer
+
+__all__ = ['DelphiLexer', 'PortugolLexer']
+
+
+class PortugolLexer(Lexer):
+ """For Portugol, a Pascal dialect with keywords in Portuguese."""
+ name = 'Portugol'
+ aliases = ['portugol']
+ filenames = ['*.alg', '*.portugol']
+ mimetypes = []
+ url = "https://www.apoioinformatica.inf.br/produtos/visualg/linguagem"
+
+ def __init__(self, **options):
+ Lexer.__init__(self, **options)
+ self.lexer = DelphiLexer(**options, portugol=True)
+
+ def get_tokens_unprocessed(self, text):
+ return self.lexer.get_tokens_unprocessed(text)
+
+
+class DelphiLexer(Lexer):
+ """
+ For Delphi (Borland Object Pascal),
+ Turbo Pascal and Free Pascal source code.
+
+ Additional options accepted:
+
+ `turbopascal`
+ Highlight Turbo Pascal specific keywords (default: ``True``).
+ `delphi`
+ Highlight Borland Delphi specific keywords (default: ``True``).
+ `freepascal`
+ Highlight Free Pascal specific keywords (default: ``True``).
+ `units`
+ A list of units that should be considered builtin, supported are
+ ``System``, ``SysUtils``, ``Classes`` and ``Math``.
+ Default is to consider all of them builtin.
+ """
+ name = 'Delphi'
+ aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
+ filenames = ['*.pas', '*.dpr']
+ mimetypes = ['text/x-pascal']
+
+ TURBO_PASCAL_KEYWORDS = (
+ 'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
+ 'const', 'constructor', 'continue', 'destructor', 'div', 'do',
+ 'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
+ 'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
+ 'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
+ 'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
+ 'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
+ 'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
+ )
+
+ DELPHI_KEYWORDS = (
+ 'as', 'class', 'except', 'exports', 'finalization', 'finally',
+ 'initialization', 'is', 'library', 'on', 'property', 'raise',
+ 'threadvar', 'try'
+ )
+
+ FREE_PASCAL_KEYWORDS = (
+ 'dispose', 'exit', 'false', 'new', 'true'
+ )
+
+ BLOCK_KEYWORDS = {
+ 'begin', 'class', 'const', 'constructor', 'destructor', 'end',
+ 'finalization', 'function', 'implementation', 'initialization',
+ 'label', 'library', 'operator', 'procedure', 'program', 'property',
+ 'record', 'threadvar', 'type', 'unit', 'uses', 'var'
+ }
+
+ FUNCTION_MODIFIERS = {
+ 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
+ 'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
+ 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
+ 'override', 'assembler'
+ }
+
+ # XXX: those aren't global. but currently we know no way for defining
+ # them just for the type context.
+ DIRECTIVES = {
+ 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
+ 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
+ 'published', 'public'
+ }
+
+ BUILTIN_TYPES = {
+ 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
+ 'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
+ 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
+ 'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
+ 'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
+ 'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
+ 'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
+ 'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
+ 'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
+ 'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
+ 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
+ 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
+ 'widechar', 'widestring', 'word', 'wordbool'
+ }
+
+ BUILTIN_UNITS = {
+ 'System': (
+ 'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
+ 'append', 'arctan', 'assert', 'assigned', 'assignfile',
+ 'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
+ 'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
+ 'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
+ 'dispose', 'doubletocomp', 'endthread', 'enummodules',
+ 'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
+ 'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
+ 'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
+ 'findresourcehinstance', 'flush', 'frac', 'freemem',
+ 'get8087cw', 'getdir', 'getlasterror', 'getmem',
+ 'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
+ 'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
+ 'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
+ 'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
+ 'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
+ 'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
+ 'randomize', 'read', 'readln', 'reallocmem',
+ 'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
+ 'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
+ 'set8087cw', 'setlength', 'setlinebreakstyle',
+ 'setmemorymanager', 'setstring', 'settextbuf',
+ 'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
+ 'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
+ 'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
+ 'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
+ 'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
+ 'utf8tounicode', 'val', 'vararrayredim', 'varclear',
+ 'widecharlentostring', 'widecharlentostrvar',
+ 'widechartostring', 'widechartostrvar',
+ 'widestringtoucs4string', 'write', 'writeln'
+ ),
+ 'SysUtils': (
+ 'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
+ 'allocmem', 'ansicomparefilename', 'ansicomparestr',
+ 'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
+ 'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
+ 'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
+ 'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
+ 'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
+ 'ansistrscan', 'ansistrupper', 'ansiuppercase',
+ 'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
+ 'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
+ 'callterminateprocs', 'changefileext', 'charlength',
+ 'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
+ 'comparetext', 'createdir', 'createguid', 'currentyear',
+ 'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
+ 'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
+ 'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
+ 'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
+ 'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
+ 'exceptionerrormessage', 'excludetrailingbackslash',
+ 'excludetrailingpathdelimiter', 'expandfilename',
+ 'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
+ 'extractfiledrive', 'extractfileext', 'extractfilename',
+ 'extractfilepath', 'extractrelativepath', 'extractshortpathname',
+ 'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
+ 'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
+ 'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
+ 'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
+ 'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
+ 'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
+ 'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
+ 'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
+ 'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
+ 'getenvironmentvariable', 'getfileversion', 'getformatsettings',
+ 'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
+ 'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
+ 'includetrailingbackslash', 'includetrailingpathdelimiter',
+ 'incmonth', 'initializepackage', 'interlockeddecrement',
+ 'interlockedexchange', 'interlockedexchangeadd',
+ 'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
+ 'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
+ 'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
+ 'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
+ 'outofmemoryerror', 'quotedstr', 'raiselastoserror',
+ 'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
+ 'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
+ 'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
+ 'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
+ 'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
+ 'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
+ 'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
+ 'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
+ 'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
+ 'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
+ 'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
+ 'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
+ 'strtotimedef', 'strupper', 'supports', 'syserrormessage',
+ 'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
+ 'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
+ 'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
+ 'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
+ 'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
+ 'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
+ 'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
+ 'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
+ 'wraptext'
+ ),
+ 'Classes': (
+ 'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
+ 'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
+ 'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
+ 'groupdescendantswith', 'hextobin', 'identtoint',
+ 'initinheritedcomponent', 'inttoident', 'invalidpoint',
+ 'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
+ 'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
+ 'pointsequal', 'readcomponentres', 'readcomponentresex',
+ 'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
+ 'registerclasses', 'registercomponents', 'registerintegerconsts',
+ 'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
+ 'teststreamformat', 'unregisterclass', 'unregisterclasses',
+ 'unregisterintegerconsts', 'unregistermoduleclasses',
+ 'writecomponentresfile'
+ ),
+ 'Math': (
+ 'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
+ 'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
+ 'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
+ 'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
+ 'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
+ 'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
+ 'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
+ 'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
+ 'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
+ 'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
+ 'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
+ 'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
+ 'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
+ 'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
+ 'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
+ 'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
+ 'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
+ 'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
+ 'tan', 'tanh', 'totalvariance', 'variance'
+ )
+ }
+
+ ASM_REGISTERS = {
+ 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
+ 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
+ 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
+ 'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
+ 'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
+ 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
+ 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
+ 'xmm6', 'xmm7'
+ }
+
+ ASM_INSTRUCTIONS = {
+ 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
+ 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
+ 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
+ 'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
+ 'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
+ 'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
+ 'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
+ 'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
+ 'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
+ 'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
+ 'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
+ 'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
+ 'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
+ 'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
+ 'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
+ 'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
+ 'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
+ 'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
+ 'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
+ 'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
+ 'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
+ 'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
+ 'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
+ 'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
+ 'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
+ 'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
+ 'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
+ 'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
+ 'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
+ 'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
+ 'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
+ 'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
+ 'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
+ 'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
+ 'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
+ 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
+ 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
+ 'xlatb', 'xor'
+ }
+
+ PORTUGOL_KEYWORDS = (
+ 'aleatorio',
+ 'algoritmo',
+ 'arquivo',
+ 'ate',
+ 'caso',
+ 'cronometro',
+ 'debug',
+ 'e',
+ 'eco',
+ 'enquanto',
+ 'entao',
+ 'escolha',
+ 'escreva',
+ 'escreval',
+ 'faca',
+ 'falso',
+ 'fimalgoritmo',
+ 'fimenquanto',
+ 'fimescolha',
+ 'fimfuncao',
+ 'fimpara',
+ 'fimprocedimento',
+ 'fimrepita',
+ 'fimse',
+ 'funcao',
+ 'inicio',
+ 'int',
+ 'interrompa',
+ 'leia',
+ 'limpatela',
+ 'mod',
+ 'nao',
+ 'ou',
+ 'outrocaso',
+ 'para',
+ 'passo',
+ 'pausa',
+ 'procedimento',
+ 'repita',
+ 'retorne',
+ 'se',
+ 'senao',
+ 'timer',
+ 'var',
+ 'vetor',
+ 'verdadeiro',
+ 'xou',
+ 'div',
+ 'mod',
+ 'abs',
+ 'arccos',
+ 'arcsen',
+ 'arctan',
+ 'cos',
+ 'cotan',
+ 'Exp',
+ 'grauprad',
+ 'int',
+ 'log',
+ 'logn',
+ 'pi',
+ 'quad',
+ 'radpgrau',
+ 'raizq',
+ 'rand',
+ 'randi',
+ 'sen',
+ 'Tan',
+ 'asc',
+ 'carac',
+ 'caracpnum',
+ 'compr',
+ 'copia',
+ 'maiusc',
+ 'minusc',
+ 'numpcarac',
+ 'pos',
+ )
+
+ PORTUGOL_BUILTIN_TYPES = {
+ 'inteiro', 'real', 'caractere', 'logico'
+ }
+
+ def __init__(self, **options):
+ Lexer.__init__(self, **options)
+ self.keywords = set()
+ self.builtins = set()
+ if get_bool_opt(options, 'portugol', False):
+ self.keywords.update(self.PORTUGOL_KEYWORDS)
+ self.builtins.update(self.PORTUGOL_BUILTIN_TYPES)
+ self.is_portugol = True
+ else:
+ self.is_portugol = False
+
+ if get_bool_opt(options, 'turbopascal', True):
+ self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
+ if get_bool_opt(options, 'delphi', True):
+ self.keywords.update(self.DELPHI_KEYWORDS)
+ if get_bool_opt(options, 'freepascal', True):
+ self.keywords.update(self.FREE_PASCAL_KEYWORDS)
+ for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
+ self.builtins.update(self.BUILTIN_UNITS[unit])
+
+ def get_tokens_unprocessed(self, text):
+ scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
+ stack = ['initial']
+ in_function_block = False
+ in_property_block = False
+ was_dot = False
+ next_token_is_function = False
+ next_token_is_property = False
+ collect_labels = False
+ block_labels = set()
+ brace_balance = [0, 0]
+
+ while not scanner.eos:
+ token = Error
+
+ if stack[-1] == 'initial':
+ if scanner.scan(r'\s+'):
+ token = Whitespace
+ elif not self.is_portugol and scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
+ if scanner.match.startswith('$'):
+ token = Comment.Preproc
+ else:
+ token = Comment.Multiline
+ elif scanner.scan(r'//.*?$'):
+ token = Comment.Single
+ elif self.is_portugol and scanner.scan(r'(<\-)|(>=)|(<=)|%|<|>|-|\+|\*|\=|(<>)|\/|\.|:|,'):
+ token = Operator
+ elif not self.is_portugol and scanner.scan(r'[-+*\/=<>:;,.@\^]'):
+ token = Operator
+ # stop label highlighting on next ";"
+ if collect_labels and scanner.match == ';':
+ collect_labels = False
+ elif scanner.scan(r'[\(\)\[\]]+'):
+ token = Punctuation
+ # abort function naming ``foo = Function(...)``
+ next_token_is_function = False
+ # if we are in a function block we count the open
+ # braces because ootherwise it's impossible to
+ # determine the end of the modifier context
+ if in_function_block or in_property_block:
+ if scanner.match == '(':
+ brace_balance[0] += 1
+ elif scanner.match == ')':
+ brace_balance[0] -= 1
+ elif scanner.match == '[':
+ brace_balance[1] += 1
+ elif scanner.match == ']':
+ brace_balance[1] -= 1
+ elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
+ lowercase_name = scanner.match.lower()
+ if lowercase_name == 'result':
+ token = Name.Builtin.Pseudo
+ elif lowercase_name in self.keywords:
+ token = Keyword
+ # if we are in a special block and a
+ # block ending keyword occurs (and the parenthesis
+ # is balanced) we end the current block context
+ if self.is_portugol:
+ if lowercase_name in ('funcao', 'procedimento'):
+ in_function_block = True
+ next_token_is_function = True
+ else:
+ if (in_function_block or in_property_block) and \
+ lowercase_name in self.BLOCK_KEYWORDS and \
+ brace_balance[0] <= 0 and \
+ brace_balance[1] <= 0:
+ in_function_block = False
+ in_property_block = False
+ brace_balance = [0, 0]
+ block_labels = set()
+ if lowercase_name in ('label', 'goto'):
+ collect_labels = True
+ elif lowercase_name == 'asm':
+ stack.append('asm')
+ elif lowercase_name == 'property':
+ in_property_block = True
+ next_token_is_property = True
+ elif lowercase_name in ('procedure', 'operator',
+ 'function', 'constructor',
+ 'destructor'):
+ in_function_block = True
+ next_token_is_function = True
+ # we are in a function block and the current name
+ # is in the set of registered modifiers. highlight
+ # it as pseudo keyword
+ elif not self.is_portugol and in_function_block and \
+ lowercase_name in self.FUNCTION_MODIFIERS:
+ token = Keyword.Pseudo
+ # if we are in a property highlight some more
+ # modifiers
+ elif not self.is_portugol and in_property_block and \
+ lowercase_name in ('read', 'write'):
+ token = Keyword.Pseudo
+ next_token_is_function = True
+ # if the last iteration set next_token_is_function
+ # to true we now want this name highlighted as
+ # function. so do that and reset the state
+ elif next_token_is_function:
+ # Look if the next token is a dot. If yes it's
+ # not a function, but a class name and the
+ # part after the dot a function name
+ if not self.is_portugol and scanner.test(r'\s*\.\s*'):
+ token = Name.Class
+ # it's not a dot, our job is done
+ else:
+ token = Name.Function
+ next_token_is_function = False
+
+ if self.is_portugol:
+ block_labels.add(scanner.match.lower())
+
+ # same for properties
+ elif not self.is_portugol and next_token_is_property:
+ token = Name.Property
+ next_token_is_property = False
+ # Highlight this token as label and add it
+ # to the list of known labels
+ elif not self.is_portugol and collect_labels:
+ token = Name.Label
+ block_labels.add(scanner.match.lower())
+ # name is in list of known labels
+ elif lowercase_name in block_labels:
+ token = Name.Label
+ elif self.is_portugol and lowercase_name in self.PORTUGOL_BUILTIN_TYPES:
+ token = Keyword.Type
+ elif not self.is_portugol and lowercase_name in self.BUILTIN_TYPES:
+ token = Keyword.Type
+ elif not self.is_portugol and lowercase_name in self.DIRECTIVES:
+ token = Keyword.Pseudo
+ # builtins are just builtins if the token
+ # before isn't a dot
+ elif not self.is_portugol and not was_dot and lowercase_name in self.builtins:
+ token = Name.Builtin
+ else:
+ token = Name
+ elif self.is_portugol and scanner.scan(r"\""):
+ token = String
+ stack.append('string')
+ elif not self.is_portugol and scanner.scan(r"'"):
+ token = String
+ stack.append('string')
+ elif not self.is_portugol and scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
+ token = String.Char
+ elif not self.is_portugol and scanner.scan(r'\$[0-9A-Fa-f]+'):
+ token = Number.Hex
+ elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
+ token = Number.Integer
+ elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
+ token = Number.Float
+ else:
+ # if the stack depth is deeper than once, pop
+ if len(stack) > 1:
+ stack.pop()
+ scanner.get_char()
+
+ elif stack[-1] == 'string':
+ if self.is_portugol:
+ if scanner.scan(r"''"):
+ token = String.Escape
+ elif scanner.scan(r"\""):
+ token = String
+ stack.pop()
+ elif scanner.scan(r"[^\"]*"):
+ token = String
+ else:
+ scanner.get_char()
+ stack.pop()
+ else:
+ if scanner.scan(r"''"):
+ token = String.Escape
+ elif scanner.scan(r"'"):
+ token = String
+ stack.pop()
+ elif scanner.scan(r"[^']*"):
+ token = String
+ else:
+ scanner.get_char()
+ stack.pop()
+ elif not self.is_portugol and stack[-1] == 'asm':
+ if scanner.scan(r'\s+'):
+ token = Whitespace
+ elif scanner.scan(r'end'):
+ token = Keyword
+ stack.pop()
+ elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
+ if scanner.match.startswith('$'):
+ token = Comment.Preproc
+ else:
+ token = Comment.Multiline
+ elif scanner.scan(r'//.*?$'):
+ token = Comment.Single
+ elif scanner.scan(r"'"):
+ token = String
+ stack.append('string')
+ elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
+ token = Name.Label
+ elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
+ lowercase_name = scanner.match.lower()
+ if lowercase_name in self.ASM_INSTRUCTIONS:
+ token = Keyword
+ elif lowercase_name in self.ASM_REGISTERS:
+ token = Name.Builtin
+ else:
+ token = Name
+ elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
+ token = Operator
+ elif scanner.scan(r'[\(\)\[\]]+'):
+ token = Punctuation
+ elif scanner.scan(r'\$[0-9A-Fa-f]+'):
+ token = Number.Hex
+ elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
+ token = Number.Integer
+ elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
+ token = Number.Float
+ else:
+ scanner.get_char()
+ stack.pop()
+
+ # save the dot!!!11
+ if not self.is_portugol and scanner.match.strip():
+ was_dot = scanner.match == '.'
+
+ yield scanner.start_pos, token, scanner.match or ''
diff --git a/pygments/lexers/pawn.py b/pygments/lexers/pawn.py
new file mode 100644
index 0000000..41b19da
--- /dev/null
+++ b/pygments/lexers/pawn.py
@@ -0,0 +1,202 @@
+"""
+ pygments.lexers.pawn
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Pawn languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+from pygments.util import get_bool_opt
+
+__all__ = ['SourcePawnLexer', 'PawnLexer']
+
+
+class SourcePawnLexer(RegexLexer):
+ """
+ For SourcePawn source code with preprocessor directives.
+
+ .. versionadded:: 1.6
+ """
+ name = 'SourcePawn'
+ aliases = ['sp']
+ filenames = ['*.sp']
+ mimetypes = ['text/x-sourcepawn']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
+ #: only one /* */ style comment
+ _ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
+
+ tokens = {
+ 'root': [
+ # preprocessor directives: without whitespace
+ (r'^#if\s+0', Comment.Preproc, 'if0'),
+ ('^#', Comment.Preproc, 'macro'),
+ # or with whitespace
+ ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
+ ('^' + _ws1 + '#', Comment.Preproc, 'macro'),
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
+ (r'[{}]', Punctuation),
+ (r'L?"', String, 'string'),
+ (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
+ (r'0[0-7]+[LlUu]*', Number.Oct),
+ (r'\d+[LlUu]*', Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'[()\[\],.;]', Punctuation),
+ (r'(case|const|continue|native|'
+ r'default|else|enum|for|if|new|operator|'
+ r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
+ (r'(bool|Float)\b', Keyword.Type),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/\*(.|\n)*?\*/', Comment.Multiline),
+ (r'//.*?\n', Comment.Single, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
+ (r'.*?\n', Comment),
+ ]
+ }
+
+ SM_TYPES = {'Action', 'bool', 'Float', 'Plugin', 'String', 'any',
+ 'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType',
+ 'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart',
+ 'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow',
+ 'ConVarBounds', 'QueryCookie', 'ReplySource',
+ 'ConVarQueryResult', 'ConVarQueryFinished', 'Function',
+ 'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult',
+ 'DBBindType', 'DBPriority', 'PropType', 'PropFieldType',
+ 'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode',
+ 'EventHook', 'FileType', 'FileTimeMode', 'PathType',
+ 'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes',
+ 'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction',
+ 'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary',
+ 'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType',
+ 'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType',
+ 'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus',
+ 'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond',
+ 'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType',
+ 'TopMenuPosition', 'TopMenuObject', 'UserMsg'}
+
+ def __init__(self, **options):
+ self.smhighlighting = get_bool_opt(options,
+ 'sourcemod', True)
+
+ self._functions = set()
+ if self.smhighlighting:
+ from pygments.lexers._sourcemod_builtins import FUNCTIONS
+ self._functions.update(FUNCTIONS)
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name:
+ if self.smhighlighting:
+ if value in self.SM_TYPES:
+ token = Keyword.Type
+ elif value in self._functions:
+ token = Name.Builtin
+ yield index, token, value
+
+
+class PawnLexer(RegexLexer):
+ """
+ For Pawn source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Pawn'
+ aliases = ['pawn']
+ filenames = ['*.p', '*.pwn', '*.inc']
+ mimetypes = ['text/x-pawn']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/[*][\w\W]*?[*]/)+'
+ #: only one /* */ style comment
+ _ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
+
+ tokens = {
+ 'root': [
+ # preprocessor directives: without whitespace
+ (r'^#if\s+0', Comment.Preproc, 'if0'),
+ ('^#', Comment.Preproc, 'macro'),
+ # or with whitespace
+ ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
+ ('^' + _ws1 + '#', Comment.Preproc, 'macro'),
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?\*[\w\W]*?\*(\\\n)?/', Comment.Multiline),
+ (r'[{}]', Punctuation),
+ (r'L?"', String, 'string'),
+ (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
+ (r'0[0-7]+[LlUu]*', Number.Oct),
+ (r'\d+[LlUu]*', Number.Integer),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'[()\[\],.;]', Punctuation),
+ (r'(switch|case|default|const|new|static|char|continue|break|'
+ r'if|else|for|while|do|operator|enum|'
+ r'public|return|sizeof|tagof|state|goto)\b', Keyword),
+ (r'(bool|Float)\b', Keyword.Type),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/\*(.|\n)*?\*/', Comment.Multiline),
+ (r'//.*?\n', Comment.Single, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
+ (r'.*?\n', Comment),
+ ]
+ }
+
+ def analyse_text(text):
+ """This is basically C. There is a keyword which doesn't exist in C
+ though and is nearly unique to this language."""
+ if 'tagof' in text:
+ return 0.01
diff --git a/pygments/lexers/perl.py b/pygments/lexers/perl.py
new file mode 100644
index 0000000..d733794
--- /dev/null
+++ b/pygments/lexers/perl.py
@@ -0,0 +1,733 @@
+"""
+ pygments.lexers.perl
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Perl, Raku and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
+ using, this, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+from pygments.util import shebang_matches
+
+__all__ = ['PerlLexer', 'Perl6Lexer']
+
+
+class PerlLexer(RegexLexer):
+ """
+ For Perl source code.
+ """
+
+ name = 'Perl'
+ url = 'https://www.perl.org'
+ aliases = ['perl', 'pl']
+ filenames = ['*.pl', '*.pm', '*.t', '*.perl']
+ mimetypes = ['text/x-perl', 'application/x-perl']
+
+ flags = re.DOTALL | re.MULTILINE
+ # TODO: give this to a perl guy who knows how to parse perl...
+ tokens = {
+ 'balanced-regex': [
+ (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'),
+ (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'),
+ (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
+ (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'),
+ (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'),
+ (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'),
+ (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'),
+ (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'),
+ (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'),
+ (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'),
+ ],
+ 'root': [
+ (r'\A\#!.+?$', Comment.Hashbang),
+ (r'\#.*?$', Comment.Single),
+ (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
+ (words((
+ 'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach',
+ 'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then',
+ 'unless', 'until', 'while', 'print', 'new', 'BEGIN',
+ 'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'),
+ Keyword),
+ (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)',
+ bygroups(Keyword, Whitespace, Name, Whitespace, Punctuation, Whitespace), 'format'),
+ (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
+ # common delimiters
+ (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*',
+ String.Regex),
+ (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
+ (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
+ (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*',
+ String.Regex),
+ (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*',
+ String.Regex),
+ # balanced delimiters
+ (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'),
+ (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'),
+ (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex,
+ 'balanced-regex'),
+ (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex,
+ 'balanced-regex'),
+
+ (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex),
+ (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
+ (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*',
+ String.Regex),
+ (r'\s+', Whitespace),
+ (words((
+ 'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir',
+ 'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect',
+ 'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die',
+ 'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent',
+ 'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl',
+ 'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid',
+ 'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin',
+ 'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp',
+ 'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber',
+ 'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname',
+ 'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime',
+ 'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last',
+ 'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat',
+ 'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'oct', 'open',
+ 'opendir', 'ord', 'our', 'pack', 'pipe', 'pop', 'pos', 'printf',
+ 'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir',
+ 'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename',
+ 'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir',
+ 'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent',
+ 'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent',
+ 'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown',
+ 'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt',
+ 'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread',
+ 'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr',
+ 'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie',
+ 'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'),
+ Name.Builtin),
+ (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
+ (r'(<<)([\'"]?)([a-zA-Z_]\w*)(\2;?\n.*?\n)(\3)(\n)',
+ bygroups(String, String, String.Delimiter, String, String.Delimiter, Whitespace)),
+ (r'__END__', Comment.Preproc, 'end-part'),
+ (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
+ (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
+ (r'[$@%#]+', Name.Variable, 'varname'),
+ (r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
+ (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
+ (r'0b[01]+(_[01]+)*', Number.Bin),
+ (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
+ Number.Float),
+ (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
+ (r'\d+(_\d+)*', Number.Integer),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick),
+ (r'<([^\s>]+)>', String.Regex),
+ (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
+ (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
+ (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
+ (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
+ (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other),
+ (r'(package)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'(use|require|no)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'(sub)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
+ (words((
+ 'no', 'package', 'require', 'use'), suffix=r'\b'),
+ Keyword),
+ (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|'
+ r'!~|&&?|\|\||\.{1,3})', Operator),
+ (r'[-+/*%=<>&^|!\\~]=?', Operator),
+ (r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage
+ # of punctuation in Perl!
+ (r'(?=\w)', Name, 'name'),
+ ],
+ 'format': [
+ (r'\.\n', String.Interpol, '#pop'),
+ (r'[^\n]*\n', String.Interpol),
+ ],
+ 'varname': [
+ (r'\s+', Whitespace),
+ (r'\{', Punctuation, '#pop'), # hash syntax?
+ (r'\)|,', Punctuation, '#pop'), # argument specifier
+ (r'\w+::', Name.Namespace),
+ (r'[\w:]+', Name.Variable, '#pop'),
+ ],
+ 'name': [
+ (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*(::)?(?=\s*->)', Name.Namespace, '#pop'),
+ (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*::', Name.Namespace, '#pop'),
+ (r'[\w:]+', Name, '#pop'),
+ (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'),
+ (r'(?=\W)', Text, '#pop'),
+ ],
+ 'funcname': [
+ (r'[a-zA-Z_]\w*[!?]?', Name.Function),
+ (r'\s+', Whitespace),
+ # argument declaration
+ (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Whitespace)),
+ (r';', Punctuation, '#pop'),
+ (r'.*?\{', Punctuation, '#pop'),
+ ],
+ 'cb-string': [
+ (r'\\[{}\\]', String.Other),
+ (r'\\', String.Other),
+ (r'\{', String.Other, 'cb-string'),
+ (r'\}', String.Other, '#pop'),
+ (r'[^{}\\]+', String.Other)
+ ],
+ 'rb-string': [
+ (r'\\[()\\]', String.Other),
+ (r'\\', String.Other),
+ (r'\(', String.Other, 'rb-string'),
+ (r'\)', String.Other, '#pop'),
+ (r'[^()]+', String.Other)
+ ],
+ 'sb-string': [
+ (r'\\[\[\]\\]', String.Other),
+ (r'\\', String.Other),
+ (r'\[', String.Other, 'sb-string'),
+ (r'\]', String.Other, '#pop'),
+ (r'[^\[\]]+', String.Other)
+ ],
+ 'lt-string': [
+ (r'\\[<>\\]', String.Other),
+ (r'\\', String.Other),
+ (r'\<', String.Other, 'lt-string'),
+ (r'\>', String.Other, '#pop'),
+ (r'[^<>]+', String.Other)
+ ],
+ 'end-part': [
+ (r'.+', Comment.Preproc, '#pop')
+ ]
+ }
+
+ def analyse_text(text):
+ if shebang_matches(text, r'perl'):
+ return True
+
+ result = 0
+
+ if re.search(r'(?:my|our)\s+[$@%(]', text):
+ result += 0.9
+
+ if ':=' in text:
+ # := is not valid Perl, but it appears in unicon, so we should
+ # become less confident if we think we found Perl with :=
+ result /= 2
+
+ return result
+
+
+class Perl6Lexer(ExtendedRegexLexer):
+ """
+ For Raku (a.k.a. Perl 6) source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Perl6'
+ url = 'https://www.raku.org'
+ aliases = ['perl6', 'pl6', 'raku']
+ filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6',
+ '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod',
+ '*.rakutest', '*.rakudoc']
+ mimetypes = ['text/x-perl6', 'application/x-perl6']
+ flags = re.MULTILINE | re.DOTALL
+
+ PERL6_IDENTIFIER_RANGE = r"['\w:-]"
+
+ PERL6_KEYWORDS = (
+ #Phasers
+ 'BEGIN','CATCH','CHECK','CLOSE','CONTROL','DOC','END','ENTER','FIRST',
+ 'INIT','KEEP','LAST','LEAVE','NEXT','POST','PRE','QUIT','UNDO',
+ #Keywords
+ 'anon','augment','but','class','constant','default','does','else',
+ 'elsif','enum','for','gather','given','grammar','has','if','import',
+ 'is','let','loop','made','make','method','module','multi','my','need',
+ 'orwith','our','proceed','proto','repeat','require','return',
+ 'return-rw','returns','role','rule','state','sub','submethod','subset',
+ 'succeed','supersede','token','try','unit','unless','until','use',
+ 'when','while','with','without',
+ #Traits
+ 'export','native','repr','required','rw','symbol',
+ )
+
+ PERL6_BUILTINS = (
+ 'ACCEPTS','abs','abs2rel','absolute','accept','accessed','acos',
+ 'acosec','acosech','acosh','acotan','acotanh','acquire','act','action',
+ 'actions','add','add_attribute','add_enum_value','add_fallback',
+ 'add_method','add_parent','add_private_method','add_role','add_trustee',
+ 'adverb','after','all','allocate','allof','allowed','alternative-names',
+ 'annotations','antipair','antipairs','any','anyof','app_lifetime',
+ 'append','arch','archname','args','arity','Array','asec','asech','asin',
+ 'asinh','ASSIGN-KEY','ASSIGN-POS','assuming','ast','at','atan','atan2',
+ 'atanh','AT-KEY','atomic-assign','atomic-dec-fetch','atomic-fetch',
+ 'atomic-fetch-add','atomic-fetch-dec','atomic-fetch-inc',
+ 'atomic-fetch-sub','atomic-inc-fetch','AT-POS','attributes','auth',
+ 'await','backtrace','Bag','BagHash','bail-out','base','basename',
+ 'base-repeating','batch','BIND-KEY','BIND-POS','bind-stderr',
+ 'bind-stdin','bind-stdout','bind-udp','bits','bless','block','Bool',
+ 'bool-only','bounds','break','Bridge','broken','BUILD','build-date',
+ 'bytes','cache','callframe','calling-package','CALL-ME','callsame',
+ 'callwith','can','cancel','candidates','cando','can-ok','canonpath',
+ 'caps','caption','Capture','cas','catdir','categorize','categorize-list',
+ 'catfile','catpath','cause','ceiling','cglobal','changed','Channel',
+ 'chars','chdir','child','child-name','child-typename','chmod','chomp',
+ 'chop','chr','chrs','chunks','cis','classify','classify-list','cleanup',
+ 'clone','close','closed','close-stdin','cmp-ok','code','codes','collate',
+ 'column','comb','combinations','command','comment','compiler','Complex',
+ 'compose','compose_type','composer','condition','config',
+ 'configure_destroy','configure_type_checking','conj','connect',
+ 'constraints','construct','contains','contents','copy','cos','cosec',
+ 'cosech','cosh','cotan','cotanh','count','count-only','cpu-cores',
+ 'cpu-usage','CREATE','create_type','cross','cue','curdir','curupdir','d',
+ 'Date','DateTime','day','daycount','day-of-month','day-of-week',
+ 'day-of-year','days-in-month','declaration','decode','decoder','deepmap',
+ 'default','defined','DEFINITE','delayed','DELETE-KEY','DELETE-POS',
+ 'denominator','desc','DESTROY','destroyers','devnull','diag',
+ 'did-you-mean','die','dies-ok','dir','dirname','dir-sep','DISTROnames',
+ 'do','does','does-ok','done','done-testing','duckmap','dynamic','e',
+ 'eager','earlier','elems','emit','enclosing','encode','encoder',
+ 'encoding','end','ends-with','enum_from_value','enum_value_list',
+ 'enum_values','enums','eof','EVAL','eval-dies-ok','EVALFILE',
+ 'eval-lives-ok','exception','excludes-max','excludes-min','EXISTS-KEY',
+ 'EXISTS-POS','exit','exitcode','exp','expected','explicitly-manage',
+ 'expmod','extension','f','fail','fails-like','fc','feature','file',
+ 'filename','find_method','find_method_qualified','finish','first','flat',
+ 'flatmap','flip','floor','flunk','flush','fmt','format','formatter',
+ 'freeze','from','from-list','from-loop','from-posix','full',
+ 'full-barrier','get','get_value','getc','gist','got','grab','grabpairs',
+ 'grep','handle','handled','handles','hardware','has_accessor','Hash',
+ 'head','headers','hh-mm-ss','hidden','hides','hour','how','hyper','id',
+ 'illegal','im','in','indent','index','indices','indir','infinite',
+ 'infix','infix:<+>','infix:<->','install_method_cache','Instant',
+ 'instead','Int','int-bounds','interval','in-timezone','invalid-str',
+ 'invert','invocant','IO','IO::Notification.watch-path','is_trusted',
+ 'is_type','isa','is-absolute','isa-ok','is-approx','is-deeply',
+ 'is-hidden','is-initial-thread','is-int','is-lazy','is-leap-year',
+ 'isNaN','isnt','is-prime','is-relative','is-routine','is-setting',
+ 'is-win','item','iterator','join','keep','kept','KERNELnames','key',
+ 'keyof','keys','kill','kv','kxxv','l','lang','last','lastcall','later',
+ 'lazy','lc','leading','level','like','line','lines','link','List',
+ 'listen','live','lives-ok','local','lock','log','log10','lookup','lsb',
+ 'made','MAIN','make','Map','match','max','maxpairs','merge','message',
+ 'method','method_table','methods','migrate','min','minmax','minpairs',
+ 'minute','misplaced','Mix','MixHash','mkdir','mode','modified','month',
+ 'move','mro','msb','multi','multiness','my','name','named','named_names',
+ 'narrow','nativecast','native-descriptor','nativesizeof','new','new_type',
+ 'new-from-daycount','new-from-pairs','next','nextcallee','next-handle',
+ 'nextsame','nextwith','NFC','NFD','NFKC','NFKD','nl-in','nl-out',
+ 'nodemap','nok','none','norm','not','note','now','nude','Num',
+ 'numerator','Numeric','of','offset','offset-in-hours','offset-in-minutes',
+ 'ok','old','on-close','one','on-switch','open','opened','operation',
+ 'optional','ord','ords','orig','os-error','osname','out-buffer','pack',
+ 'package','package-kind','package-name','packages','pair','pairs',
+ 'pairup','parameter','params','parent','parent-name','parents','parse',
+ 'parse-base','parsefile','parse-names','parts','pass','path','path-sep',
+ 'payload','peer-host','peer-port','periods','perl','permutations','phaser',
+ 'pick','pickpairs','pid','placeholder','plan','plus','polar','poll',
+ 'polymod','pop','pos','positional','posix','postfix','postmatch',
+ 'precomp-ext','precomp-target','pred','prefix','prematch','prepend',
+ 'print','printf','print-nl','print-to','private','private_method_table',
+ 'proc','produce','Promise','prompt','protect','pull-one','push',
+ 'push-all','push-at-least','push-exactly','push-until-lazy','put',
+ 'qualifier-type','quit','r','race','radix','rand','range','Rat','raw',
+ 're','read','readchars','readonly','ready','Real','reallocate','reals',
+ 'reason','rebless','receive','recv','redispatcher','redo','reduce',
+ 'rel2abs','relative','release','rename','repeated','replacement',
+ 'report','reserved','resolve','restore','result','resume','rethrow',
+ 'reverse','right','rindex','rmdir','role','roles_to_compose','rolish',
+ 'roll','rootdir','roots','rotate','rotor','round','roundrobin',
+ 'routine-type','run','rwx','s','samecase','samemark','samewith','say',
+ 'schedule-on','scheduler','scope','sec','sech','second','seek','self',
+ 'send','Set','set_hidden','set_name','set_package','set_rw','set_value',
+ 'SetHash','set-instruments','setup_finalization','shape','share','shell',
+ 'shift','sibling','sigil','sign','signal','signals','signature','sin',
+ 'sinh','sink','sink-all','skip','skip-at-least','skip-at-least-pull-one',
+ 'skip-one','skip-rest','sleep','sleep-timer','sleep-until','Slip','slurp',
+ 'slurp-rest','slurpy','snap','snapper','so','socket-host','socket-port',
+ 'sort','source','source-package','spawn','SPEC','splice','split',
+ 'splitdir','splitpath','sprintf','spurt','sqrt','squish','srand','stable',
+ 'start','started','starts-with','status','stderr','stdout','Str',
+ 'sub_signature','subbuf','subbuf-rw','subname','subparse','subst',
+ 'subst-mutate','substr','substr-eq','substr-rw','subtest','succ','sum',
+ 'Supply','symlink','t','tail','take','take-rw','tan','tanh','tap',
+ 'target','target-name','tc','tclc','tell','then','throttle','throw',
+ 'throws-like','timezone','tmpdir','to','today','todo','toggle','to-posix',
+ 'total','trailing','trans','tree','trim','trim-leading','trim-trailing',
+ 'truncate','truncated-to','trusts','try_acquire','trying','twigil','type',
+ 'type_captures','typename','uc','udp','uncaught_handler','unimatch',
+ 'uniname','uninames','uniparse','uniprop','uniprops','unique','unival',
+ 'univals','unlike','unlink','unlock','unpack','unpolar','unshift',
+ 'unwrap','updir','USAGE','use-ok','utc','val','value','values','VAR',
+ 'variable','verbose-config','version','VMnames','volume','vow','w','wait',
+ 'warn','watch','watch-path','week','weekday-of-month','week-number',
+ 'week-year','WHAT','when','WHERE','WHEREFORE','WHICH','WHO',
+ 'whole-second','WHY','wordcase','words','workaround','wrap','write',
+ 'write-to','x','yada','year','yield','yyyy-mm-dd','z','zip','zip-latest',
+
+ )
+
+ PERL6_BUILTIN_CLASSES = (
+ #Booleans
+ 'False','True',
+ #Classes
+ 'Any','Array','Associative','AST','atomicint','Attribute','Backtrace',
+ 'Backtrace::Frame','Bag','Baggy','BagHash','Blob','Block','Bool','Buf',
+ 'Callable','CallFrame','Cancellation','Capture','CArray','Channel','Code',
+ 'compiler','Complex','ComplexStr','Cool','CurrentThreadScheduler',
+ 'Cursor','Date','Dateish','DateTime','Distro','Duration','Encoding',
+ 'Exception','Failure','FatRat','Grammar','Hash','HyperWhatever','Instant',
+ 'Int','int16','int32','int64','int8','IntStr','IO','IO::ArgFiles',
+ 'IO::CatHandle','IO::Handle','IO::Notification','IO::Path',
+ 'IO::Path::Cygwin','IO::Path::QNX','IO::Path::Unix','IO::Path::Win32',
+ 'IO::Pipe','IO::Socket','IO::Socket::Async','IO::Socket::INET','IO::Spec',
+ 'IO::Spec::Cygwin','IO::Spec::QNX','IO::Spec::Unix','IO::Spec::Win32',
+ 'IO::Special','Iterable','Iterator','Junction','Kernel','Label','List',
+ 'Lock','Lock::Async','long','longlong','Macro','Map','Match',
+ 'Metamodel::AttributeContainer','Metamodel::C3MRO','Metamodel::ClassHOW',
+ 'Metamodel::EnumHOW','Metamodel::Finalization','Metamodel::MethodContainer',
+ 'Metamodel::MROBasedMethodDispatch','Metamodel::MultipleInheritance',
+ 'Metamodel::Naming','Metamodel::Primitives','Metamodel::PrivateMethodContainer',
+ 'Metamodel::RoleContainer','Metamodel::Trusting','Method','Mix','MixHash',
+ 'Mixy','Mu','NFC','NFD','NFKC','NFKD','Nil','Num','num32','num64',
+ 'Numeric','NumStr','ObjAt','Order','Pair','Parameter','Perl','Pod::Block',
+ 'Pod::Block::Code','Pod::Block::Comment','Pod::Block::Declarator',
+ 'Pod::Block::Named','Pod::Block::Para','Pod::Block::Table','Pod::Heading',
+ 'Pod::Item','Pointer','Positional','PositionalBindFailover','Proc',
+ 'Proc::Async','Promise','Proxy','PseudoStash','QuantHash','Range','Rat',
+ 'Rational','RatStr','Real','Regex','Routine','Scalar','Scheduler',
+ 'Semaphore','Seq','Set','SetHash','Setty','Signature','size_t','Slip',
+ 'Stash','Str','StrDistance','Stringy','Sub','Submethod','Supplier',
+ 'Supplier::Preserving','Supply','Systemic','Tap','Telemetry',
+ 'Telemetry::Instrument::Thread','Telemetry::Instrument::Usage',
+ 'Telemetry::Period','Telemetry::Sampler','Thread','ThreadPoolScheduler',
+ 'UInt','uint16','uint32','uint64','uint8','Uni','utf8','Variable',
+ 'Version','VM','Whatever','WhateverCode','WrapHandle'
+ )
+
+ PERL6_OPERATORS = (
+ 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div',
+ 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm',
+ 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx',
+ '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^',
+ '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&',
+ 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^',
+ '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^',
+ '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv',
+ '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so',
+ 'not', '<==', '==>', '<<==', '==>>','unicmp',
+ )
+
+ # Perl 6 has a *lot* of possible bracketing characters
+ # this list was lifted from STD.pm6 (https://github.com/perl6/std)
+ PERL6_BRACKETS = {
+ '\u0028': '\u0029', '\u003c': '\u003e', '\u005b': '\u005d',
+ '\u007b': '\u007d', '\u00ab': '\u00bb', '\u0f3a': '\u0f3b',
+ '\u0f3c': '\u0f3d', '\u169b': '\u169c', '\u2018': '\u2019',
+ '\u201a': '\u2019', '\u201b': '\u2019', '\u201c': '\u201d',
+ '\u201e': '\u201d', '\u201f': '\u201d', '\u2039': '\u203a',
+ '\u2045': '\u2046', '\u207d': '\u207e', '\u208d': '\u208e',
+ '\u2208': '\u220b', '\u2209': '\u220c', '\u220a': '\u220d',
+ '\u2215': '\u29f5', '\u223c': '\u223d', '\u2243': '\u22cd',
+ '\u2252': '\u2253', '\u2254': '\u2255', '\u2264': '\u2265',
+ '\u2266': '\u2267', '\u2268': '\u2269', '\u226a': '\u226b',
+ '\u226e': '\u226f', '\u2270': '\u2271', '\u2272': '\u2273',
+ '\u2274': '\u2275', '\u2276': '\u2277', '\u2278': '\u2279',
+ '\u227a': '\u227b', '\u227c': '\u227d', '\u227e': '\u227f',
+ '\u2280': '\u2281', '\u2282': '\u2283', '\u2284': '\u2285',
+ '\u2286': '\u2287', '\u2288': '\u2289', '\u228a': '\u228b',
+ '\u228f': '\u2290', '\u2291': '\u2292', '\u2298': '\u29b8',
+ '\u22a2': '\u22a3', '\u22a6': '\u2ade', '\u22a8': '\u2ae4',
+ '\u22a9': '\u2ae3', '\u22ab': '\u2ae5', '\u22b0': '\u22b1',
+ '\u22b2': '\u22b3', '\u22b4': '\u22b5', '\u22b6': '\u22b7',
+ '\u22c9': '\u22ca', '\u22cb': '\u22cc', '\u22d0': '\u22d1',
+ '\u22d6': '\u22d7', '\u22d8': '\u22d9', '\u22da': '\u22db',
+ '\u22dc': '\u22dd', '\u22de': '\u22df', '\u22e0': '\u22e1',
+ '\u22e2': '\u22e3', '\u22e4': '\u22e5', '\u22e6': '\u22e7',
+ '\u22e8': '\u22e9', '\u22ea': '\u22eb', '\u22ec': '\u22ed',
+ '\u22f0': '\u22f1', '\u22f2': '\u22fa', '\u22f3': '\u22fb',
+ '\u22f4': '\u22fc', '\u22f6': '\u22fd', '\u22f7': '\u22fe',
+ '\u2308': '\u2309', '\u230a': '\u230b', '\u2329': '\u232a',
+ '\u23b4': '\u23b5', '\u2768': '\u2769', '\u276a': '\u276b',
+ '\u276c': '\u276d', '\u276e': '\u276f', '\u2770': '\u2771',
+ '\u2772': '\u2773', '\u2774': '\u2775', '\u27c3': '\u27c4',
+ '\u27c5': '\u27c6', '\u27d5': '\u27d6', '\u27dd': '\u27de',
+ '\u27e2': '\u27e3', '\u27e4': '\u27e5', '\u27e6': '\u27e7',
+ '\u27e8': '\u27e9', '\u27ea': '\u27eb', '\u2983': '\u2984',
+ '\u2985': '\u2986', '\u2987': '\u2988', '\u2989': '\u298a',
+ '\u298b': '\u298c', '\u298d': '\u298e', '\u298f': '\u2990',
+ '\u2991': '\u2992', '\u2993': '\u2994', '\u2995': '\u2996',
+ '\u2997': '\u2998', '\u29c0': '\u29c1', '\u29c4': '\u29c5',
+ '\u29cf': '\u29d0', '\u29d1': '\u29d2', '\u29d4': '\u29d5',
+ '\u29d8': '\u29d9', '\u29da': '\u29db', '\u29f8': '\u29f9',
+ '\u29fc': '\u29fd', '\u2a2b': '\u2a2c', '\u2a2d': '\u2a2e',
+ '\u2a34': '\u2a35', '\u2a3c': '\u2a3d', '\u2a64': '\u2a65',
+ '\u2a79': '\u2a7a', '\u2a7d': '\u2a7e', '\u2a7f': '\u2a80',
+ '\u2a81': '\u2a82', '\u2a83': '\u2a84', '\u2a8b': '\u2a8c',
+ '\u2a91': '\u2a92', '\u2a93': '\u2a94', '\u2a95': '\u2a96',
+ '\u2a97': '\u2a98', '\u2a99': '\u2a9a', '\u2a9b': '\u2a9c',
+ '\u2aa1': '\u2aa2', '\u2aa6': '\u2aa7', '\u2aa8': '\u2aa9',
+ '\u2aaa': '\u2aab', '\u2aac': '\u2aad', '\u2aaf': '\u2ab0',
+ '\u2ab3': '\u2ab4', '\u2abb': '\u2abc', '\u2abd': '\u2abe',
+ '\u2abf': '\u2ac0', '\u2ac1': '\u2ac2', '\u2ac3': '\u2ac4',
+ '\u2ac5': '\u2ac6', '\u2acd': '\u2ace', '\u2acf': '\u2ad0',
+ '\u2ad1': '\u2ad2', '\u2ad3': '\u2ad4', '\u2ad5': '\u2ad6',
+ '\u2aec': '\u2aed', '\u2af7': '\u2af8', '\u2af9': '\u2afa',
+ '\u2e02': '\u2e03', '\u2e04': '\u2e05', '\u2e09': '\u2e0a',
+ '\u2e0c': '\u2e0d', '\u2e1c': '\u2e1d', '\u2e20': '\u2e21',
+ '\u3008': '\u3009', '\u300a': '\u300b', '\u300c': '\u300d',
+ '\u300e': '\u300f', '\u3010': '\u3011', '\u3014': '\u3015',
+ '\u3016': '\u3017', '\u3018': '\u3019', '\u301a': '\u301b',
+ '\u301d': '\u301e', '\ufd3e': '\ufd3f', '\ufe17': '\ufe18',
+ '\ufe35': '\ufe36', '\ufe37': '\ufe38', '\ufe39': '\ufe3a',
+ '\ufe3b': '\ufe3c', '\ufe3d': '\ufe3e', '\ufe3f': '\ufe40',
+ '\ufe41': '\ufe42', '\ufe43': '\ufe44', '\ufe47': '\ufe48',
+ '\ufe59': '\ufe5a', '\ufe5b': '\ufe5c', '\ufe5d': '\ufe5e',
+ '\uff08': '\uff09', '\uff1c': '\uff1e', '\uff3b': '\uff3d',
+ '\uff5b': '\uff5d', '\uff5f': '\uff60', '\uff62': '\uff63',
+ }
+
+ def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''):
+ if boundary_regex_fragment is None:
+ return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \
+ suffix + r')\b'
+ else:
+ return r'(?<!' + boundary_regex_fragment + r')' + prefix + r'(' + \
+ r'|'.join(re.escape(x) for x in words) + r')' + suffix + r'(?!' + \
+ boundary_regex_fragment + r')'
+
+ def brackets_callback(token_class):
+ def callback(lexer, match, context):
+ groups = match.groupdict()
+ opening_chars = groups['delimiter']
+ n_chars = len(opening_chars)
+ adverbs = groups.get('adverbs')
+
+ closer = Perl6Lexer.PERL6_BRACKETS.get(opening_chars[0])
+ text = context.text
+
+ if closer is None: # it's not a mirrored character, which means we
+ # just need to look for the next occurrence
+
+ end_pos = text.find(opening_chars, match.start('delimiter') + n_chars)
+ else: # we need to look for the corresponding closing character,
+ # keep nesting in mind
+ closing_chars = closer * n_chars
+ nesting_level = 1
+
+ search_pos = match.start('delimiter')
+
+ while nesting_level > 0:
+ next_open_pos = text.find(opening_chars, search_pos + n_chars)
+ next_close_pos = text.find(closing_chars, search_pos + n_chars)
+
+ if next_close_pos == -1:
+ next_close_pos = len(text)
+ nesting_level = 0
+ elif next_open_pos != -1 and next_open_pos < next_close_pos:
+ nesting_level += 1
+ search_pos = next_open_pos
+ else: # next_close_pos < next_open_pos
+ nesting_level -= 1
+ search_pos = next_close_pos
+
+ end_pos = next_close_pos
+
+ if end_pos < 0: # if we didn't find a closer, just highlight the
+ # rest of the text in this class
+ end_pos = len(text)
+
+ if adverbs is not None and re.search(r':to\b', adverbs):
+ heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos]
+ end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) +
+ r'\s*$', text[end_pos:], re.MULTILINE)
+
+ if end_heredoc:
+ end_pos += end_heredoc.end()
+ else:
+ end_pos = len(text)
+
+ yield match.start(), token_class, text[match.start():end_pos + n_chars]
+ context.pos = end_pos + n_chars
+
+ return callback
+
+ def opening_brace_callback(lexer, match, context):
+ stack = context.stack
+
+ yield match.start(), Text, context.text[match.start():match.end()]
+ context.pos = match.end()
+
+ # if we encounter an opening brace and we're one level
+ # below a token state, it means we need to increment
+ # the nesting level for braces so we know later when
+ # we should return to the token rules.
+ if len(stack) > 2 and stack[-2] == 'token':
+ context.perl6_token_nesting_level += 1
+
+ def closing_brace_callback(lexer, match, context):
+ stack = context.stack
+
+ yield match.start(), Text, context.text[match.start():match.end()]
+ context.pos = match.end()
+
+ # if we encounter a free closing brace and we're one level
+ # below a token state, it means we need to check the nesting
+ # level to see if we need to return to the token state.
+ if len(stack) > 2 and stack[-2] == 'token':
+ context.perl6_token_nesting_level -= 1
+ if context.perl6_token_nesting_level == 0:
+ stack.pop()
+
+ def embedded_perl6_callback(lexer, match, context):
+ context.perl6_token_nesting_level = 1
+ yield match.start(), Text, context.text[match.start():match.end()]
+ context.pos = match.end()
+ context.stack.append('root')
+
+ # If you're modifying these rules, be careful if you need to process '{' or '}'
+ # characters. We have special logic for processing these characters (due to the fact
+ # that you can nest Perl 6 code in regex blocks), so if you need to process one of
+ # them, make sure you also process the corresponding one!
+ tokens = {
+ 'common': [
+ (r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)',
+ brackets_callback(Comment.Multiline)),
+ (r'#[^\n]*$', Comment.Single),
+ (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline),
+ (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline),
+ (r'^=.*?\n\s*?\n', Comment.Multiline),
+ (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)',
+ bygroups(Keyword, Name), 'token-sym-brackets'),
+ (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + r')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?',
+ bygroups(Keyword, Name), 'pre-token'),
+ # deal with a special case in the Perl 6 grammar (role q { ... })
+ (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Whitespace, Name, Whitespace)),
+ (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword),
+ (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'),
+ Name.Builtin),
+ (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin),
+ # copied from PerlLexer
+ (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*',
+ Name.Variable),
+ (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global),
+ (r'::\?\w+', Name.Variable.Global),
+ (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*',
+ Name.Variable.Global),
+ (r'\$(?:<.*?>)+', Name.Variable),
+ (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])'
+ r'(?P=first_char)*)', brackets_callback(String)),
+ # copied from PerlLexer
+ (r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
+ (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
+ (r'0b[01]+(_[01]+)*', Number.Bin),
+ (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
+ Number.Float),
+ (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
+ (r'\d+(_\d+)*', Number.Integer),
+ (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex),
+ (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex),
+ (r'm\w+(?=\()', Name),
+ (r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^\w:\s])'
+ r'(?P=first_char)*)', brackets_callback(String.Regex)),
+ (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/',
+ String.Regex),
+ (r'<[^\s=].*?\S>', String),
+ (_build_word_match(PERL6_OPERATORS), Operator),
+ (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ ],
+ 'root': [
+ include('common'),
+ (r'\{', opening_brace_callback),
+ (r'\}', closing_brace_callback),
+ (r'.+?', Text),
+ ],
+ 'pre-token': [
+ include('common'),
+ (r'\{', Text, ('#pop', 'token')),
+ (r'.+?', Text),
+ ],
+ 'token-sym-brackets': [
+ (r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)',
+ brackets_callback(Name), ('#pop', 'pre-token')),
+ default(('#pop', 'pre-token')),
+ ],
+ 'token': [
+ (r'\}', Text, '#pop'),
+ (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)),
+ # make sure that quotes in character classes aren't treated as strings
+ (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex),
+ # make sure that '#' characters in quotes aren't treated as comments
+ (r"(?<!\\)'(\\\\|\\[^\\]|[^'\\])*'", String.Regex),
+ (r'(?<!\\)"(\\\\|\\[^\\]|[^"\\])*"', String.Regex),
+ (r'#.*?$', Comment.Single),
+ (r'\{', embedded_perl6_callback),
+ ('.+?', String.Regex),
+ ],
+ }
+
+ def analyse_text(text):
+ def strip_pod(lines):
+ in_pod = False
+ stripped_lines = []
+
+ for line in lines:
+ if re.match(r'^=(?:end|cut)', line):
+ in_pod = False
+ elif re.match(r'^=\w+', line):
+ in_pod = True
+ elif not in_pod:
+ stripped_lines.append(line)
+
+ return stripped_lines
+
+ # XXX handle block comments
+ lines = text.splitlines()
+ lines = strip_pod(lines)
+ text = '\n'.join(lines)
+
+ if shebang_matches(text, r'perl6|rakudo|niecza|pugs'):
+ return True
+
+ saw_perl_decl = False
+ rating = False
+
+ # check for my/our/has declarations
+ if re.search(r"(?:my|our|has)\s+(?:" + Perl6Lexer.PERL6_IDENTIFIER_RANGE +
+ r"+\s+)?[$@%&(]", text):
+ rating = 0.8
+ saw_perl_decl = True
+
+ for line in lines:
+ line = re.sub('#.*', '', line)
+ if re.match(r'^\s*$', line):
+ continue
+
+ # match v6; use v6; use v6.0; use v6.0.0;
+ if re.match(r'^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line):
+ return True
+ # match class, module, role, enum, grammar declarations
+ class_decl = re.match(r'^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line)
+ if class_decl:
+ if saw_perl_decl or class_decl.group('scope') is not None:
+ return True
+ rating = 0.05
+ continue
+ break
+
+ if ':=' in text:
+ # Same logic as above for PerlLexer
+ rating /= 2
+
+ return rating
+
+ def __init__(self, **options):
+ super().__init__(**options)
+ self.encoding = options.get('encoding', 'utf-8')
diff --git a/pygments/lexers/phix.py b/pygments/lexers/phix.py
new file mode 100644
index 0000000..b292aa5
--- /dev/null
+++ b/pygments/lexers/phix.py
@@ -0,0 +1,364 @@
+"""
+ pygments.lexers.phix
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Phix.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Whitespace
+
+__all__ = ['PhixLexer']
+
+
+class PhixLexer(RegexLexer):
+ """
+ Pygments Lexer for Phix files (.exw).
+ See http://phix.x10.mx
+
+ .. versionadded:: 2.14.0
+ """
+
+ name = 'Phix'
+ url = 'http://phix.x10.mx'
+ aliases = ['phix']
+ filenames = ['*.exw']
+ mimetypes = ['text/x-phix']
+
+ flags = re.MULTILINE # nb: **NOT** re.DOTALL! (totally spanners comment handling)
+
+ preproc = (
+ 'ifdef', 'elsifdef', 'elsedef'
+ )
+ # Note these lists are auto-generated by pwa/p2js.exw, when pwa\src\p2js_keywords.e (etc)
+ # change, though of course subsequent copy/commit/pull requests are all manual steps.
+ types = (
+ 'string', 'nullable_string', 'atom_string', 'atom', 'bool', 'boolean',
+ 'cdCanvan', 'cdCanvas', 'complex', 'CURLcode', 'dictionary', 'int',
+ 'integer', 'Ihandle', 'Ihandles', 'Ihandln', 'mpfr', 'mpq', 'mpz',
+ 'mpz_or_string', 'number', 'rid_string', 'seq', 'sequence', 'timedate',
+ 'object'
+ )
+ keywords = (
+ 'abstract', 'class', 'continue', 'export', 'extends', 'nullable',
+ 'private', 'public', 'static', 'struct', 'trace',
+ 'and', 'break', 'by', 'case', 'catch', 'const', 'constant', 'debug',
+ 'default', 'do', 'else', 'elsif', 'end', 'enum', 'exit', 'fallthru',
+ 'fallthrough', 'for', 'forward', 'function', 'global', 'if', 'in',
+ 'include', 'js', 'javascript', 'javascript_semantics', 'let', 'not',
+ 'or', 'procedure', 'profile', 'profile_time', 'return', 'safe_mode',
+ 'switch', 'then', 'to', 'try', 'type', 'type_check', 'until', 'warning',
+ 'while', 'with', 'without', 'xor'
+ )
+ routines = (
+ 'abort', 'abs', 'adjust_timedate', 'and_bits', 'and_bitsu', 'apply',
+ 'append', 'arccos', 'arcsin', 'arctan', 'assert', 'atan2',
+ 'atom_to_float32', 'atom_to_float64', 'bankers_rounding', 'beep',
+ 'begins', 'binary_search', 'bits_to_int', 'bk_color', 'bytes_to_int',
+ 'call_func', 'call_proc', 'cdCanvasActivate', 'cdCanvasArc',
+ 'cdCanvasBegin', 'cdCanvasBox', 'cdCanvasChord', 'cdCanvasCircle',
+ 'cdCanvasClear', 'cdCanvasEnd', 'cdCanvasFlush', 'cdCanvasFont',
+ 'cdCanvasGetImageRGB', 'cdCanvasGetSize', 'cdCanvasGetTextAlignment',
+ 'cdCanvasGetTextSize', 'cdCanvasLine', 'cdCanvasMark',
+ 'cdCanvasMarkSize', 'cdCanvasMultiLineVectorText', 'cdCanvasPixel',
+ 'cdCanvasRect', 'cdCanvasRoundedBox', 'cdCanvasRoundedRect',
+ 'cdCanvasSector', 'cdCanvasSetAttribute', 'cdCanvasSetBackground',
+ 'cdCanvasSetFillMode', 'cdCanvasSetForeground',
+ 'cdCanvasSetInteriorStyle', 'cdCanvasSetLineStyle',
+ 'cdCanvasSetLineWidth', 'cdCanvasSetTextAlignment', 'cdCanvasText',
+ 'cdCanvasSetTextOrientation', 'cdCanvasGetTextOrientation',
+ 'cdCanvasVectorText', 'cdCanvasVectorTextDirection',
+ 'cdCanvasVectorTextSize', 'cdCanvasVertex', 'cdCreateCanvas',
+ 'cdDecodeAlpha', 'cdDecodeColor', 'cdDecodeColorAlpha', 'cdEncodeAlpha',
+ 'cdEncodeColor', 'cdEncodeColorAlpha', 'cdKillCanvas', 'cdVersion',
+ 'cdVersionDate', 'ceil', 'change_timezone', 'choose', 'clear_screen',
+ 'columnize', 'command_line', 'compare', 'complex_abs', 'complex_add',
+ 'complex_arg', 'complex_conjugate', 'complex_cos', 'complex_cosh',
+ 'complex_div', 'complex_exp', 'complex_imag', 'complex_inv',
+ 'complex_log', 'complex_mul', 'complex_neg', 'complex_new',
+ 'complex_norm', 'complex_power', 'complex_rho', 'complex_real',
+ 'complex_round', 'complex_sin', 'complex_sinh', 'complex_sprint',
+ 'complex_sqrt', 'complex_sub', 'complex_theta', 'concat', 'cos',
+ 'crash', 'custom_sort', 'date', 'day_of_week', 'day_of_year',
+ 'days_in_month', 'decode_base64', 'decode_flags', 'deep_copy', 'deld',
+ 'deserialize', 'destroy_dict', 'destroy_queue', 'destroy_stack',
+ 'dict_name', 'dict_size', 'elapsed', 'elapsed_short', 'encode_base64',
+ 'equal', 'even', 'exp', 'extract', 'factorial', 'factors',
+ 'file_size_k', 'find', 'find_all', 'find_any', 'find_replace', 'filter',
+ 'flatten', 'float32_to_atom', 'float64_to_atom', 'floor',
+ 'format_timedate', 'free_console', 'from_polar', 'gcd', 'get_file_base',
+ 'get_file_extension', 'get_file_name', 'get_file_name_and_path',
+ 'get_file_path', 'get_file_path_and_name', 'get_maxprime', 'get_prime',
+ 'get_primes', 'get_primes_le', 'get_proper_dir', 'get_proper_path',
+ 'get_rand', 'get_routine_info', 'get_test_abort', 'get_test_logfile',
+ 'get_test_pause', 'get_test_verbosity', 'get_tzid', 'getd', 'getdd',
+ 'getd_all_keys', 'getd_by_index', 'getd_index', 'getd_partial_key',
+ 'glAttachShader', 'glBindBuffer', 'glBindTexture', 'glBufferData',
+ 'glCanvasSpecialText', 'glClear', 'glClearColor', 'glColor',
+ 'glCompileShader', 'glCreateBuffer', 'glCreateProgram',
+ 'glCreateShader', 'glCreateTexture', 'glDeleteProgram',
+ 'glDeleteShader', 'glDrawArrays', 'glEnable',
+ 'glEnableVertexAttribArray', 'glFloat32Array', 'glInt32Array',
+ 'glFlush', 'glGetAttribLocation', 'glGetError', 'glGetProgramInfoLog',
+ 'glGetProgramParameter', 'glGetShaderInfoLog', 'glGetShaderParameter',
+ 'glGetUniformLocation', 'glLinkProgram', 'glLoadIdentity',
+ 'glMatrixMode', 'glOrtho', 'glRotatef', 'glShadeModel',
+ 'glShaderSource', 'glSimpleA7texcoords', 'glTexImage2Dc',
+ 'glTexParameteri', 'glTranslate', 'glUniform1f', 'glUniform1i',
+ 'glUniformMatrix4fv', 'glUseProgram', 'glVertex',
+ 'glVertexAttribPointer', 'glViewport', 'head', 'hsv_to_rgb', 'iff',
+ 'iif', 'include_file', 'incl0de_file', 'insert', 'instance',
+ 'int_to_bits', 'int_to_bytes', 'is_dict', 'is_integer', 's_leap_year',
+ 'is_prime', 'is_prime2', 'islower', 'isupper', 'Icallback',
+ 'iup_isdouble', 'iup_isprint', 'iup_XkeyBase', 'IupAppend', 'IupAlarm',
+ 'IupBackgroundBox', 'IupButton', 'IupCalendar', 'IupCanvas',
+ 'IupClipboard', 'IupClose', 'IupCloseOnEscape', 'IupControlsOpen',
+ 'IupDatePick', 'IupDestroy', 'IupDialog', 'IupDrawArc', 'IupDrawBegin',
+ 'IupDrawEnd', 'IupDrawGetSize', 'IupDrawGetTextSize', 'IupDrawLine',
+ 'IupDrawRectangle', 'IupDrawText', 'IupExpander', 'IupFill',
+ 'IupFlatLabel', 'IupFlatList', 'IupFlatTree', 'IupFlush', 'IupFrame',
+ 'IupGetAttribute', 'IupGetAttributeId', 'IupGetAttributePtr',
+ 'IupGetBrother', 'IupGetChild', 'IupGetChildCount', 'IupGetClassName',
+ 'IupGetDialog', 'IupGetDialogChild', 'IupGetDouble', 'IupGetFocus',
+ 'IupGetGlobal', 'IupGetGlobalInt', 'IupGetGlobalIntInt', 'IupGetInt',
+ 'IupGetInt2', 'IupGetIntId', 'IupGetIntInt', 'IupGetParent',
+ 'IupGLCanvas', 'IupGLCanvasOpen', 'IupGLMakeCurrent', 'IupGraph',
+ 'IupHbox', 'IupHide', 'IupImage', 'IupImageRGBA', 'IupItem',
+ 'iupKeyCodeToName', 'IupLabel', 'IupLink', 'IupList', 'IupMap',
+ 'IupMenu', 'IupMenuItem', 'IupMessage', 'IupMessageDlg', 'IupMultiBox',
+ 'IupMultiLine', 'IupNextField', 'IupNormaliser', 'IupOpen',
+ 'IupPlayInput', 'IupPopup', 'IupPreviousField', 'IupProgressBar',
+ 'IupRadio', 'IupRecordInput', 'IupRedraw', 'IupRefresh',
+ 'IupRefreshChildren', 'IupSeparator', 'IupSetAttribute',
+ 'IupSetAttributes', 'IupSetAttributeHandle', 'IupSetAttributeId',
+ 'IupSetAttributePtr', 'IupSetCallback', 'IupSetCallbacks',
+ 'IupSetDouble', 'IupSetFocus', 'IupSetGlobal', 'IupSetGlobalInt',
+ 'IupSetGlobalFunction', 'IupSetHandle', 'IupSetInt',
+ 'IupSetStrAttribute', 'IupSetStrGlobal', 'IupShow', 'IupShowXY',
+ 'IupSplit', 'IupStoreAttribute', 'IupSubmenu', 'IupTable',
+ 'IupTableClearSelected', 'IupTableClick_cb', 'IupTableGetSelected',
+ 'IupTableResize_cb', 'IupTableSetData', 'IupTabs', 'IupText',
+ 'IupTimer', 'IupToggle', 'IupTreeAddNodes', 'IupTreeView', 'IupUpdate',
+ 'IupValuator', 'IupVbox', 'join', 'join_by', 'join_path', 'k_perm',
+ 'largest', 'lcm', 'length', 'log', 'log10', 'log2', 'lower',
+ 'm4_crossProduct', 'm4_inverse', 'm4_lookAt', 'm4_multiply',
+ 'm4_normalize', 'm4_perspective', 'm4_subtractVectors', 'm4_xRotate',
+ 'm4_yRotate', 'machine_bits', 'machine_word', 'match', 'match_all',
+ 'match_replace', 'max', 'maxsq', 'min', 'minsq', 'mod', 'mpfr_add',
+ 'mpfr_ceil', 'mpfr_cmp', 'mpfr_cmp_si', 'mpfr_const_pi', 'mpfr_div',
+ 'mpfr_div_si', 'mpfr_div_z', 'mpfr_floor', 'mpfr_free', 'mpfr_get_d',
+ 'mpfr_get_default_precision', 'mpfr_get_default_rounding_mode',
+ 'mpfr_get_fixed', 'mpfr_get_precision', 'mpfr_get_si', 'mpfr_init',
+ 'mpfr_inits', 'mpfr_init_set', 'mpfr_init_set_q', 'mpfr_init_set_z',
+ 'mpfr_mul', 'mpfr_mul_si', 'mpfr_pow_si', 'mpfr_set', 'mpfr_set_d',
+ 'mpfr_set_default_precision', 'mpfr_set_default_rounding_mode',
+ 'mpfr_set_precision', 'mpfr_set_q', 'mpfr_set_si', 'mpfr_set_str',
+ 'mpfr_set_z', 'mpfr_si_div', 'mpfr_si_sub', 'mpfr_sqrt', 'mpfr_sub',
+ 'mpfr_sub_si', 'mpq_abs', 'mpq_add', 'mpq_add_si', 'mpq_canonicalize',
+ 'mpq_cmp', 'mpq_cmp_si', 'mpq_div', 'mpq_div_2exp', 'mpq_free',
+ 'mpq_get_den', 'mpq_get_num', 'mpq_get_str', 'mpq_init', 'mpq_init_set',
+ 'mpq_init_set_si', 'mpq_init_set_str', 'mpq_init_set_z', 'mpq_inits',
+ 'mpq_inv', 'mpq_mul', 'mpq_neg', 'mpq_set', 'mpq_set_si', 'mpq_set_str',
+ 'mpq_set_z', 'mpq_sub', 'mpz_abs', 'mpz_add', 'mpz_addmul',
+ 'mpz_addmul_ui', 'mpz_addmul_si', 'mpz_add_si', 'mpz_add_ui', 'mpz_and',
+ 'mpz_bin_uiui', 'mpz_cdiv_q', 'mpz_cmp', 'mpz_cmp_si', 'mpz_divexact',
+ 'mpz_divexact_ui', 'mpz_divisible_p', 'mpz_divisible_ui_p', 'mpz_even',
+ 'mpz_fac_ui', 'mpz_factorstring', 'mpz_fdiv_q', 'mpz_fdiv_q_2exp',
+ 'mpz_fdiv_q_ui', 'mpz_fdiv_qr', 'mpz_fdiv_r', 'mpz_fdiv_ui',
+ 'mpz_fib_ui', 'mpz_fib2_ui', 'mpz_fits_atom', 'mpz_fits_integer',
+ 'mpz_free', 'mpz_gcd', 'mpz_gcd_ui', 'mpz_get_atom', 'mpz_get_integer',
+ 'mpz_get_short_str', 'mpz_get_str', 'mpz_init', 'mpz_init_set',
+ 'mpz_inits', 'mpz_invert', 'mpz_lcm', 'mpz_lcm_ui', 'mpz_max',
+ 'mpz_min', 'mpz_mod', 'mpz_mod_ui', 'mpz_mul', 'mpz_mul_2exp',
+ 'mpz_mul_d', 'mpz_mul_si', 'mpz_neg', 'mpz_nthroot', 'mpz_odd',
+ 'mpz_pollard_rho', 'mpz_pow_ui', 'mpz_powm', 'mpz_powm_ui', 'mpz_prime',
+ 'mpz_prime_factors', 'mpz_prime_mr', 'mpz_rand', 'mpz_rand_ui',
+ 'mpz_re_compose', 'mpz_remove', 'mpz_scan0', 'mpz_scan1', 'mpz_set',
+ 'mpz_set_d', 'mpz_set_si', 'mpz_set_str', 'mpz_set_v', 'mpz_sign',
+ 'mpz_sizeinbase', 'mpz_sqrt', 'mpz_sub', 'mpz_sub_si', 'mpz_sub_ui',
+ 'mpz_si_sub', 'mpz_tdiv_q_2exp', 'mpz_tdiv_r_2exp', 'mpz_tstbit',
+ 'mpz_ui_pow_ui', 'mpz_xor', 'named_dict', 'new_dict', 'new_queue',
+ 'new_stack', 'not_bits', 'not_bitsu', 'odd', 'or_all', 'or_allu',
+ 'or_bits', 'or_bitsu', 'ord', 'ordinal', 'ordinant',
+ 'override_timezone', 'pad', 'pad_head', 'pad_tail', 'parse_date_string',
+ 'papply', 'peep', 'peepn', 'peep_dict', 'permute', 'permutes',
+ 'platform', 'pop', 'popn', 'pop_dict', 'power', 'pp', 'ppEx', 'ppExf',
+ 'ppf', 'ppOpt', 'pq_add', 'pq_destroy', 'pq_empty', 'pq_new', 'pq_peek',
+ 'pq_pop', 'pq_pop_data', 'pq_size', 'prepend', 'prime_factors',
+ 'printf', 'product', 'proper', 'push', 'pushn', 'putd', 'puts',
+ 'queue_empty', 'queue_size', 'rand', 'rand_range', 'reinstate',
+ 'remainder', 'remove', 'remove_all', 'repeat', 'repeatch', 'replace',
+ 'requires', 'reverse', 'rfind', 'rgb', 'rmatch', 'rmdr', 'rnd', 'round',
+ 'routine_id', 'scanf', 'serialize', 'series', 'set_rand',
+ 'set_test_abort', 'set_test_logfile', 'set_test_module',
+ 'set_test_pause', 'set_test_verbosity', 'set_timedate_formats',
+ 'set_timezone', 'setd', 'setd_default', 'shorten', 'sha256',
+ 'shift_bits', 'shuffle', 'sign', 'sin', 'smallest', 'sort',
+ 'sort_columns', 'speak', 'splice', 'split', 'split_any', 'split_by',
+ 'sprint', 'sprintf', 'sq_abs', 'sq_add', 'sq_and', 'sq_and_bits',
+ 'sq_arccos', 'sq_arcsin', 'sq_arctan', 'sq_atom', 'sq_ceil', 'sq_cmp',
+ 'sq_cos', 'sq_div', 'sq_even', 'sq_eq', 'sq_floor', 'sq_floor_div',
+ 'sq_ge', 'sq_gt', 'sq_int', 'sq_le', 'sq_log', 'sq_log10', 'sq_log2',
+ 'sq_lt', 'sq_max', 'sq_min', 'sq_mod', 'sq_mul', 'sq_ne', 'sq_not',
+ 'sq_not_bits', 'sq_odd', 'sq_or', 'sq_or_bits', 'sq_power', 'sq_rand',
+ 'sq_remainder', 'sq_rmdr', 'sq_rnd', 'sq_round', 'sq_seq', 'sq_sign',
+ 'sq_sin', 'sq_sqrt', 'sq_str', 'sq_sub', 'sq_tan', 'sq_trunc',
+ 'sq_uminus', 'sq_xor', 'sq_xor_bits', 'sqrt', 'square_free',
+ 'stack_empty', 'stack_size', 'substitute', 'substitute_all', 'sum',
+ 'tail', 'tan', 'test_equal', 'test_fail', 'test_false',
+ 'test_not_equal', 'test_pass', 'test_summary', 'test_true',
+ 'text_color', 'throw', 'time', 'timedate_diff', 'timedelta',
+ 'to_integer', 'to_number', 'to_rgb', 'to_string', 'traverse_dict',
+ 'traverse_dict_partial_key', 'trim', 'trim_head', 'trim_tail', 'trunc',
+ 'tagset', 'tagstart', 'typeof', 'unique', 'unix_dict', 'upper',
+ 'utf8_to_utf32', 'utf32_to_utf8', 'version', 'vlookup', 'vslice',
+ 'wglGetProcAddress', 'wildcard_file', 'wildcard_match', 'with_rho',
+ 'with_theta', 'xml_new_doc', 'xml_new_element', 'xml_set_attribute',
+ 'xml_sprint', 'xor_bits', 'xor_bitsu',
+ 'accept', 'allocate', 'allocate_string', 'allow_break', 'ARM',
+ 'atom_to_float80', 'c_func', 'c_proc', 'call_back', 'chdir',
+ 'check_break', 'clearDib', 'close', 'closesocket', 'console',
+ 'copy_file', 'create', 'create_directory', 'create_thread',
+ 'curl_easy_cleanup', 'curl_easy_get_file', 'curl_easy_init',
+ 'curl_easy_perform', 'curl_easy_perform_ex', 'curl_easy_setopt',
+ 'curl_easy_strerror', 'curl_global_cleanup', 'curl_global_init',
+ 'curl_slist_append', 'curl_slist_free_all', 'current_dir', 'cursor',
+ 'define_c_func', 'define_c_proc', 'delete', 'delete_cs', 'delete_file',
+ 'dir', 'DLL', 'drawDib', 'drawShadedPolygonToDib', 'ELF32', 'ELF64',
+ 'enter_cs', 'eval', 'exit_thread', 'free', 'file_exists', 'final',
+ 'float80_to_atom', 'format', 'get_bytes', 'get_file_date',
+ 'get_file_size', 'get_file_type', 'get_interpreter', 'get_key',
+ 'get_socket_error', 'get_text', 'get_thread_exitcode', 'get_thread_id',
+ 'getc', 'getenv', 'gets', 'getsockaddr', 'glBegin', 'glCallList',
+ 'glFrustum', 'glGenLists', 'glGetString', 'glLight', 'glMaterial',
+ 'glNewList', 'glNormal', 'glPopMatrix', 'glPushMatrix', 'glRotate',
+ 'glEnd', 'glEndList', 'glTexImage2D', 'goto', 'GUI', 'icons', 'ilASM',
+ 'include_files', 'include_paths', 'init_cs', 'ip_to_string',
+ 'IupConfig', 'IupConfigDialogClosed', 'IupConfigDialogShow',
+ 'IupConfigGetVariableInt', 'IupConfigLoad', 'IupConfigSave',
+ 'IupConfigSetVariableInt', 'IupExitLoop', 'IupFileDlg', 'IupFileList',
+ 'IupGLSwapBuffers', 'IupHelp', 'IupLoopStep', 'IupMainLoop',
+ 'IupNormalizer', 'IupPlot', 'IupPlotAdd', 'IupPlotBegin', 'IupPlotEnd',
+ 'IupPlotInsert', 'IupSaveImage', 'IupTreeGetUserId', 'IupUser',
+ 'IupVersion', 'IupVersionDate', 'IupVersionNumber', 'IupVersionShow',
+ 'killDib', 'leave_cs', 'listen', 'manifest', 'mem_copy', 'mem_set',
+ 'mpfr_gamma', 'mpfr_printf', 'mpfr_sprintf', 'mpz_export', 'mpz_import',
+ 'namespace', 'new', 'newDib', 'open', 'open_dll', 'PE32', 'PE64',
+ 'peek', 'peek_string', 'peek1s', 'peek1u', 'peek2s', 'peek2u', 'peek4s',
+ 'peek4u', 'peek8s', 'peek8u', 'peekNS', 'peekns', 'peeknu', 'poke',
+ 'poke2', 'poke4', 'poke8', 'pokeN', 'poke_string', 'poke_wstring',
+ 'position', 'progress', 'prompt_number', 'prompt_string', 'read_file',
+ 'read_lines', 'recv', 'resume_thread', 'seek', 'select', 'send',
+ 'setHandler', 'shutdown', 'sleep', 'SO', 'sockaddr_in', 'socket',
+ 'split_path', 'suspend_thread', 'system', 'system_exec', 'system_open',
+ 'system_wait', 'task_clock_start', 'task_clock_stop', 'task_create',
+ 'task_delay', 'task_list', 'task_schedule', 'task_self', 'task_status',
+ 'task_suspend', 'task_yield', 'thread_safe_string', 'try_cs',
+ 'utf8_to_utf16', 'utf16_to_utf8', 'utf16_to_utf32', 'utf32_to_utf16',
+ 'video_config', 'WSACleanup', 'wait_thread', 'walk_dir', 'where',
+ 'write_lines', 'wait_key'
+ )
+ constants = (
+ 'ANY_QUEUE', 'ASCENDING', 'BLACK', 'BLOCK_CURSOR', 'BLUE',
+ 'BRIGHT_CYAN', 'BRIGHT_BLUE', 'BRIGHT_GREEN', 'BRIGHT_MAGENTA',
+ 'BRIGHT_RED', 'BRIGHT_WHITE', 'BROWN', 'C_DWORD', 'C_INT', 'C_POINTER',
+ 'C_USHORT', 'C_WORD', 'CD_AMBER', 'CD_BLACK', 'CD_BLUE', 'CD_BOLD',
+ 'CD_BOLD_ITALIC', 'CD_BOX', 'CD_CENTER', 'CD_CIRCLE', 'CD_CLOSED_LINES',
+ 'CD_CONTINUOUS', 'CD_CUSTOM', 'CD_CYAN', 'CD_DARK_BLUE', 'CD_DARK_CYAN',
+ 'CD_DARK_GRAY', 'CD_DARK_GREY', 'CD_DARK_GREEN', 'CD_DARK_MAGENTA',
+ 'CD_DARK_RED', 'CD_DARK_YELLOW', 'CD_DASH_DOT', 'CD_DASH_DOT_DOT',
+ 'CD_DASHED', 'CD_DBUFFER', 'CD_DEG2RAD', 'CD_DIAMOND', 'CD_DOTTED',
+ 'CD_EAST', 'CD_EVENODD', 'CD_FILL', 'CD_GL', 'CD_GRAY', 'CD_GREY',
+ 'CD_GREEN', 'CD_HATCH', 'CD_HOLLOW', 'CD_HOLLOW_BOX',
+ 'CD_HOLLOW_CIRCLE', 'CD_HOLLOW_DIAMOND', 'CD_INDIGO', 'CD_ITALIC',
+ 'CD_IUP', 'CD_IUPDBUFFER', 'CD_LIGHT_BLUE', 'CD_LIGHT_GRAY',
+ 'CD_LIGHT_GREY', 'CD_LIGHT_GREEN', 'CD_LIGHT_PARCHMENT', 'CD_MAGENTA',
+ 'CD_NAVY', 'CD_NORTH', 'CD_NORTH_EAST', 'CD_NORTH_WEST', 'CD_OLIVE',
+ 'CD_OPEN_LINES', 'CD_ORANGE', 'CD_PARCHMENT', 'CD_PATTERN',
+ 'CD_PRINTER', 'CD_PURPLE', 'CD_PLAIN', 'CD_PLUS', 'CD_QUERY',
+ 'CD_RAD2DEG', 'CD_RED', 'CD_SILVER', 'CD_SOLID', 'CD_SOUTH_EAST',
+ 'CD_SOUTH_WEST', 'CD_STAR', 'CD_STIPPLE', 'CD_STRIKEOUT',
+ 'CD_UNDERLINE', 'CD_WEST', 'CD_WHITE', 'CD_WINDING', 'CD_VIOLET',
+ 'CD_X', 'CD_YELLOW', 'CURLE_OK', 'CURLOPT_MAIL_FROM',
+ 'CURLOPT_MAIL_RCPT', 'CURLOPT_PASSWORD', 'CURLOPT_READDATA',
+ 'CURLOPT_READFUNCTION', 'CURLOPT_SSL_VERIFYPEER',
+ 'CURLOPT_SSL_VERIFYHOST', 'CURLOPT_UPLOAD', 'CURLOPT_URL',
+ 'CURLOPT_USE_SSL', 'CURLOPT_USERNAME', 'CURLOPT_VERBOSE',
+ 'CURLOPT_WRITEFUNCTION', 'CURLUSESSL_ALL', 'CYAN', 'D_NAME',
+ 'D_ATTRIBUTES', 'D_SIZE', 'D_YEAR', 'D_MONTH', 'D_DAY', 'D_HOUR',
+ 'D_MINUTE', 'D_SECOND', 'D_CREATION', 'D_LASTACCESS', 'D_MODIFICATION',
+ 'DT_YEAR', 'DT_MONTH', 'DT_DAY', 'DT_HOUR', 'DT_MINUTE', 'DT_SECOND',
+ 'DT_DOW', 'DT_MSEC', 'DT_DOY', 'DT_GMT', 'EULER', 'E_CODE', 'E_ADDR',
+ 'E_LINE', 'E_RTN', 'E_NAME', 'E_FILE', 'E_PATH', 'E_USER', 'false',
+ 'False', 'FALSE', 'FIFO_QUEUE', 'FILETYPE_DIRECTORY', 'FILETYPE_FILE',
+ 'GET_EOF', 'GET_FAIL', 'GET_IGNORE', 'GET_SUCCESS',
+ 'GL_AMBIENT_AND_DIFFUSE', 'GL_ARRAY_BUFFER', 'GL_CLAMP',
+ 'GL_CLAMP_TO_BORDER', 'GL_CLAMP_TO_EDGE', 'GL_COLOR_BUFFER_BIT',
+ 'GL_COMPILE', 'GL_COMPILE_STATUS', 'GL_CULL_FACE',
+ 'GL_DEPTH_BUFFER_BIT', 'GL_DEPTH_TEST', 'GL_EXTENSIONS', 'GL_FLAT',
+ 'GL_FLOAT', 'GL_FRAGMENT_SHADER', 'GL_FRONT', 'GL_LIGHT0',
+ 'GL_LIGHTING', 'GL_LINEAR', 'GL_LINK_STATUS', 'GL_MODELVIEW',
+ 'GL_NEAREST', 'GL_NO_ERROR', 'GL_NORMALIZE', 'GL_POSITION',
+ 'GL_PROJECTION', 'GL_QUAD_STRIP', 'GL_QUADS', 'GL_RENDERER',
+ 'GL_REPEAT', 'GL_RGB', 'GL_RGBA', 'GL_SMOOTH', 'GL_STATIC_DRAW',
+ 'GL_TEXTURE_2D', 'GL_TEXTURE_MAG_FILTER', 'GL_TEXTURE_MIN_FILTER',
+ 'GL_TEXTURE_WRAP_S', 'GL_TEXTURE_WRAP_T', 'GL_TRIANGLES',
+ 'GL_UNSIGNED_BYTE', 'GL_VENDOR', 'GL_VERSION', 'GL_VERTEX_SHADER',
+ 'GRAY', 'GREEN', 'GT_LF_STRIPPED', 'GT_WHOLE_FILE', 'INVLN10',
+ 'IUP_CLOSE', 'IUP_CONTINUE', 'IUP_DEFAULT', 'IUP_BLACK', 'IUP_BLUE',
+ 'IUP_BUTTON1', 'IUP_BUTTON3', 'IUP_CENTER', 'IUP_CYAN', 'IUP_DARK_BLUE',
+ 'IUP_DARK_CYAN', 'IUP_DARK_GRAY', 'IUP_DARK_GREY', 'IUP_DARK_GREEN',
+ 'IUP_DARK_MAGENTA', 'IUP_DARK_RED', 'IUP_GRAY', 'IUP_GREY', 'IUP_GREEN',
+ 'IUP_IGNORE', 'IUP_INDIGO', 'IUP_MAGENTA', 'IUP_MASK_INT',
+ 'IUP_MASK_UINT', 'IUP_MOUSEPOS', 'IUP_NAVY', 'IUP_OLIVE', 'IUP_RECTEXT',
+ 'IUP_RED', 'IUP_LIGHT_BLUE', 'IUP_LIGHT_GRAY', 'IUP_LIGHT_GREY',
+ 'IUP_LIGHT_GREEN', 'IUP_ORANGE', 'IUP_PARCHMENT', 'IUP_PURPLE',
+ 'IUP_SILVER', 'IUP_TEAL', 'IUP_VIOLET', 'IUP_WHITE', 'IUP_YELLOW',
+ 'K_BS', 'K_cA', 'K_cC', 'K_cD', 'K_cF5', 'K_cK', 'K_cM', 'K_cN', 'K_cO',
+ 'K_cP', 'K_cR', 'K_cS', 'K_cT', 'K_cW', 'K_CR', 'K_DEL', 'K_DOWN',
+ 'K_END', 'K_ESC', 'K_F1', 'K_F2', 'K_F3', 'K_F4', 'K_F5', 'K_F6',
+ 'K_F7', 'K_F8', 'K_F9', 'K_F10', 'K_F11', 'K_F12', 'K_HOME', 'K_INS',
+ 'K_LEFT', 'K_MIDDLE', 'K_PGDN', 'K_PGUP', 'K_RIGHT', 'K_SP', 'K_TAB',
+ 'K_UP', 'K_h', 'K_i', 'K_j', 'K_p', 'K_r', 'K_s', 'JS', 'LIFO_QUEUE',
+ 'LINUX', 'MAX_HEAP', 'MAGENTA', 'MIN_HEAP', 'Nan', 'NO_CURSOR', 'null',
+ 'NULL', 'PI', 'pp_Ascii', 'pp_Brkt', 'pp_Date', 'pp_File', 'pp_FltFmt',
+ 'pp_Indent', 'pp_IntCh', 'pp_IntFmt', 'pp_Maxlen', 'pp_Nest',
+ 'pp_Pause', 'pp_Q22', 'pp_StrFmt', 'RED', 'SEEK_OK', 'SLASH',
+ 'TEST_ABORT', 'TEST_CRASH', 'TEST_PAUSE', 'TEST_PAUSE_FAIL',
+ 'TEST_QUIET', 'TEST_SHOW_ALL', 'TEST_SHOW_FAILED', 'TEST_SUMMARY',
+ 'true', 'True', 'TRUE', 'VC_SCRNLINES', 'WHITE', 'WINDOWS', 'YELLOW'
+ )
+
+ tokens = {
+ 'root': [
+ (r"\s+", Whitespace),
+ (r'/\*|--/\*|#\[', Comment.Multiline, 'comment'),
+ (r'(?://|--|#!).*$', Comment.Single),
+#Alt:
+# (r'//.*$|--.*$|#!.*$', Comment.Single),
+ (r'"([^"\\]|\\.)*"', String.Other),
+ (r'\'[^\']*\'', String.Other),
+ (r'`[^`]*`', String.Other),
+
+ (words(types, prefix=r'\b', suffix=r'\b'), Name.Function),
+ (words(routines, prefix=r'\b', suffix=r'\b'), Name.Function),
+ (words(preproc, prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
+ (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
+ (words(constants, prefix=r'\b', suffix=r'\b'), Name.Constant),
+ # Aside: Phix only supports/uses the ascii/non-unicode tilde
+ (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|\.(){},?:\[\]$\\;#]', Operator),
+ (r'[\w-]+', Text)
+ ],
+ 'comment': [
+ (r'[^*/#]+', Comment.Multiline),
+ (r'/\*|#\[', Comment.Multiline, '#push'),
+ (r'\*/|#\]', Comment.Multiline, '#pop'),
+ (r'[*/#]', Comment.Multiline)
+ ]
+ }
diff --git a/pygments/lexers/php.py b/pygments/lexers/php.py
new file mode 100644
index 0000000..61d552a
--- /dev/null
+++ b/pygments/lexers/php.py
@@ -0,0 +1,319 @@
+"""
+ pygments.lexers.php
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for PHP and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, bygroups, default, \
+ using, this, words, do_insertions, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Other, Generic
+from pygments.util import get_bool_opt, get_list_opt, shebang_matches
+
+__all__ = ['ZephirLexer', 'PsyshConsoleLexer', 'PhpLexer']
+
+
+class ZephirLexer(RegexLexer):
+ """
+ For Zephir language source code.
+
+ Zephir is a compiled high level language aimed
+ to the creation of C-extensions for PHP.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Zephir'
+ url = 'http://zephir-lang.com/'
+ aliases = ['zephir']
+ filenames = ['*.zep']
+
+ zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
+ zephir_type = ['bit', 'bits', 'string']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'/', Operator, '#pop'),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ (r'^(?=\s|/)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
+ r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+ r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
+ r'empty)\b', Keyword, 'slashstartsregex'),
+ (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
+ (r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
+ r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
+ r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
+ r'transient|volatile|readonly)\b', Keyword.Reserved),
+ (r'(true|false|null|undefined)\b', Keyword.Constant),
+ (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
+ r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
+ r'window)\b', Name.Builtin),
+ (r'[$a-zA-Z_][\w\\]*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ]
+ }
+
+
+class PsyshConsoleLexer(Lexer):
+ """
+ For PsySH console output, such as:
+
+ .. sourcecode:: psysh
+
+ >>> $greeting = function($name): string {
+ ... return "Hello, {$name}";
+ ... };
+ => Closure($name): string {#2371 …3}
+ >>> $greeting('World')
+ => "Hello, World"
+
+ .. versionadded:: 2.7
+ """
+ name = 'PsySH console session for PHP'
+ url = 'https://psysh.org/'
+ aliases = ['psysh']
+
+ def __init__(self, **options):
+ options['startinline'] = True
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ phplexer = PhpLexer(**self.options)
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('>>> ') or line.startswith('... '):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:4])]))
+ curcode += line[4:]
+ elif line.rstrip() == '...':
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, '...')]))
+ curcode += line[3:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, phplexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ yield match.start(), Generic.Output, line
+ if curcode:
+ yield from do_insertions(insertions,
+ phplexer.get_tokens_unprocessed(curcode))
+
+
+class PhpLexer(RegexLexer):
+ """
+ For PHP source code.
+ For PHP embedded in HTML, use the `HtmlPhpLexer`.
+
+ Additional options accepted:
+
+ `startinline`
+ If given and ``True`` the lexer starts highlighting with
+ php code (i.e.: no starting ``<?php`` required). The default
+ is ``False``.
+ `funcnamehighlighting`
+ If given and ``True``, highlight builtin function names
+ (default: ``True``).
+ `disabledmodules`
+ If given, must be a list of module names whose function names
+ should not be highlighted. By default all modules are highlighted
+ except the special ``'unknown'`` module that includes functions
+ that are known to php but are undocumented.
+
+ To get a list of allowed modules have a look into the
+ `_php_builtins` module:
+
+ .. sourcecode:: pycon
+
+ >>> from pygments.lexers._php_builtins import MODULES
+ >>> MODULES.keys()
+ ['PHP Options/Info', 'Zip', 'dba', ...]
+
+ In fact the names of those modules match the module names from
+ the php documentation.
+ """
+
+ name = 'PHP'
+ url = 'https://www.php.net/'
+ aliases = ['php', 'php3', 'php4', 'php5']
+ filenames = ['*.php', '*.php[345]', '*.inc']
+ mimetypes = ['text/x-php']
+
+ # Note that a backslash is included, PHP uses a backslash as a namespace
+ # separator.
+ _ident_inner = r'(?:[\\_a-z]|[^\x00-\x7f])(?:[\\\w]|[^\x00-\x7f])*'
+ # But not inside strings.
+ _ident_nons = r'(?:[_a-z]|[^\x00-\x7f])(?:\w|[^\x00-\x7f])*'
+
+ flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
+ tokens = {
+ 'root': [
+ (r'<\?(php)?', Comment.Preproc, 'php'),
+ (r'[^<]+', Other),
+ (r'<', Other)
+ ],
+ 'php': [
+ (r'\?>', Comment.Preproc, '#pop'),
+ (r'(<<<)([\'"]?)(' + _ident_nons + r')(\2\n.*?\n\s*)(\3)(;?)(\n)',
+ bygroups(String, String, String.Delimiter, String, String.Delimiter,
+ Punctuation, Text)),
+ (r'\s+', Text),
+ (r'#.*?\n', Comment.Single),
+ (r'//.*?\n', Comment.Single),
+ # put the empty comment here, it is otherwise seen as
+ # the start of a docstring
+ (r'/\*\*/', Comment.Multiline),
+ (r'/\*\*.*?\*/', String.Doc),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'(->|::)(\s*)(' + _ident_nons + ')',
+ bygroups(Operator, Text, Name.Attribute)),
+ (r'[~!%^&*+=|:.<>/@-]+', Operator),
+ (r'\?', Operator), # don't add to the charclass above!
+ (r'[\[\]{}();,]+', Punctuation),
+ (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
+ (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
+ (r'(function)(\s+)(&?)(\s*)',
+ bygroups(Keyword, Text, Operator, Text), 'functionname'),
+ (r'(const)(\s+)(' + _ident_inner + ')',
+ bygroups(Keyword, Text, Name.Constant)),
+ (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
+ r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
+ r'FALSE|print|for|require|continue|foreach|require_once|'
+ r'declare|return|default|static|do|switch|die|stdClass|'
+ r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
+ r'virtual|endfor|include_once|while|endforeach|global|'
+ r'endif|list|endswitch|new|endwhile|not|'
+ r'array|E_ALL|NULL|final|php_user_filter|interface|'
+ r'implements|public|private|protected|abstract|clone|try|'
+ r'catch|throw|this|use|namespace|trait|yield|'
+ r'finally|match)\b', Keyword),
+ (r'(true|false|null)\b', Keyword.Constant),
+ include('magicconstants'),
+ (r'\$\{\$+' + _ident_inner + r'\}', Name.Variable),
+ (r'\$+' + _ident_inner, Name.Variable),
+ (_ident_inner, Name.Other),
+ (r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
+ (r'\d+e[+-]?[0-9]+', Number.Float),
+ (r'0[0-7]+', Number.Oct),
+ (r'0x[a-f0-9]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'0b[01]+', Number.Bin),
+ (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
+ (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
+ (r'"', String.Double, 'string'),
+ ],
+ 'magicfuncs': [
+ # source: http://php.net/manual/en/language.oop5.magic.php
+ (words((
+ '__construct', '__destruct', '__call', '__callStatic', '__get', '__set',
+ '__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke',
+ '__set_state', '__clone', '__debugInfo',), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicconstants': [
+ # source: http://php.net/manual/en/language.constants.predefined.php
+ (words((
+ '__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__',
+ '__TRAIT__', '__METHOD__', '__NAMESPACE__',),
+ suffix=r'\b'),
+ Name.Constant),
+ ],
+ 'classname': [
+ (_ident_inner, Name.Class, '#pop')
+ ],
+ 'functionname': [
+ include('magicfuncs'),
+ (_ident_inner, Name.Function, '#pop'),
+ default('#pop')
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'[^{$"\\]+', String.Double),
+ (r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
+ (r'\$' + _ident_nons + r'(\[\S+?\]|->' + _ident_nons + ')?',
+ String.Interpol),
+ (r'(\{\$\{)(.*?)(\}\})',
+ bygroups(String.Interpol, using(this, _startinline=True),
+ String.Interpol)),
+ (r'(\{)(\$.*?)(\})',
+ bygroups(String.Interpol, using(this, _startinline=True),
+ String.Interpol)),
+ (r'(\$\{)(\S+)(\})',
+ bygroups(String.Interpol, Name.Variable, String.Interpol)),
+ (r'[${\\]', String.Double)
+ ],
+ }
+
+ def __init__(self, **options):
+ self.funcnamehighlighting = get_bool_opt(
+ options, 'funcnamehighlighting', True)
+ self.disabledmodules = get_list_opt(
+ options, 'disabledmodules', ['unknown'])
+ self.startinline = get_bool_opt(options, 'startinline', False)
+
+ # private option argument for the lexer itself
+ if '_startinline' in options:
+ self.startinline = options.pop('_startinline')
+
+ # collect activated functions in a set
+ self._functions = set()
+ if self.funcnamehighlighting:
+ from pygments.lexers._php_builtins import MODULES
+ for key, value in MODULES.items():
+ if key not in self.disabledmodules:
+ self._functions.update(value)
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ stack = ['root']
+ if self.startinline:
+ stack.append('php')
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text, stack):
+ if token is Name.Other:
+ if value in self._functions:
+ yield index, Name.Builtin, value
+ continue
+ yield index, token, value
+
+ def analyse_text(text):
+ if shebang_matches(text, r'php'):
+ return True
+ rv = 0.0
+ if re.search(r'<\?(?!xml)', text):
+ rv += 0.3
+ return rv
diff --git a/pygments/lexers/pointless.py b/pygments/lexers/pointless.py
new file mode 100644
index 0000000..9fea7ea
--- /dev/null
+++ b/pygments/lexers/pointless.py
@@ -0,0 +1,71 @@
+"""
+ pygments.lexers.pointless
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Pointless.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text
+
+__all__ = ['PointlessLexer']
+
+
+class PointlessLexer(RegexLexer):
+ """
+ For Pointless source code.
+
+ .. versionadded:: 2.7
+ """
+
+ name = 'Pointless'
+ url = 'https://ptls.dev'
+ aliases = ['pointless']
+ filenames = ['*.ptls']
+
+ ops = words([
+ "+", "-", "*", "/", "**", "%", "+=", "-=", "*=",
+ "/=", "**=", "%=", "|>", "=", "==", "!=", "<", ">",
+ "<=", ">=", "=>", "$", "++",
+ ])
+
+ keywords = words([
+ "if", "then", "else", "where", "with", "cond",
+ "case", "and", "or", "not", "in", "as", "for",
+ "requires", "throw", "try", "catch", "when",
+ "yield", "upval",
+ ], suffix=r'\b')
+
+ tokens = {
+ 'root': [
+ (r'[ \n\r]+', Text),
+ (r'--.*$', Comment.Single),
+ (r'"""', String, 'multiString'),
+ (r'"', String, 'string'),
+ (r'[\[\](){}:;,.]', Punctuation),
+ (ops, Operator),
+ (keywords, Keyword),
+ (r'\d+|\d*\.\d+', Number),
+ (r'(true|false)\b', Name.Builtin),
+ (r'[A-Z][a-zA-Z0-9]*\b', String.Symbol),
+ (r'output\b', Name.Variable.Magic),
+ (r'(export|import)\b', Keyword.Namespace),
+ (r'[a-z][a-zA-Z0-9]*\b', Name.Variable)
+ ],
+ 'multiString': [
+ (r'\\.', String.Escape),
+ (r'"""', String, '#pop'),
+ (r'"', String),
+ (r'[^\\"]+', String),
+ ],
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'"', String, '#pop'),
+ (r'\n', Error),
+ (r'[^\\"]+', String),
+ ],
+ }
diff --git a/pygments/lexers/pony.py b/pygments/lexers/pony.py
new file mode 100644
index 0000000..014f8f8
--- /dev/null
+++ b/pygments/lexers/pony.py
@@ -0,0 +1,93 @@
+"""
+ pygments.lexers.pony
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Pony and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['PonyLexer']
+
+
+class PonyLexer(RegexLexer):
+ """
+ For Pony source code.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Pony'
+ aliases = ['pony']
+ filenames = ['*.pony']
+
+ _caps = r'(iso|trn|ref|val|box|tag)'
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'[^\S\n]+', Text),
+ (r'//.*\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'nested_comment'),
+ (r'"""(?:.|\n)*?"""', String.Doc),
+ (r'"', String, 'string'),
+ (r'\'.*\'', String.Char),
+ (r'=>|[]{}:().~;,|&!^?[]', Punctuation),
+ (words((
+ 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt',
+ 'not', 'or'),
+ suffix=r'\b'),
+ Operator.Word),
+ (r'!=|==|<<|>>|[-+/*%=<>]', Operator),
+ (words((
+ 'box', 'break', 'compile_error', 'compile_intrinsic',
+ 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error',
+ 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match',
+ 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then',
+ 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where',
+ 'while', 'with', '#any', '#read', '#send', '#share'),
+ suffix=r'\b'),
+ Keyword),
+ (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)',
+ bygroups(Keyword, Text), 'typename'),
+ (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'),
+ (words((
+ 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128',
+ 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64',
+ 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String',
+ 'Iterator'),
+ suffix=r'\b'),
+ Name.Builtin.Type),
+ (r'_?[A-Z]\w*', Name.Type),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'(true|false)\b', Name.Builtin),
+ (r'_\d*', Name),
+ (r'_?[a-z][\w\']*', Name)
+ ],
+ 'typename': [
+ (_caps + r'?((?:\s)*)(_?[A-Z]\w*)',
+ bygroups(Keyword, Text, Name.Class), '#pop')
+ ],
+ 'methodname': [
+ (_caps + r'?((?:\s)*)(_?[a-z]\w*)',
+ bygroups(Keyword, Text, Name.Function), '#pop')
+ ],
+ 'nested_comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\"', String),
+ (r'[^\\"]+', String)
+ ]
+ }
diff --git a/pygments/lexers/praat.py b/pygments/lexers/praat.py
new file mode 100644
index 0000000..b39979d
--- /dev/null
+++ b/pygments/lexers/praat.py
@@ -0,0 +1,304 @@
+"""
+ pygments.lexers.praat
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Praat
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, bygroups, include
+from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, \
+ Number, Operator, Whitespace
+
+__all__ = ['PraatLexer']
+
+
+class PraatLexer(RegexLexer):
+ """
+ For Praat scripts.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Praat'
+ url = 'http://www.praat.org'
+ aliases = ['praat']
+ filenames = ['*.praat', '*.proc', '*.psc']
+
+ keywords = (
+ 'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
+ 'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
+ 'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
+ 'editor', 'endeditor', 'clearinfo',
+ )
+
+ functions_string = (
+ 'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
+ 'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
+ 'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
+ 'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
+ )
+
+ functions_numeric = (
+ 'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
+ 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
+ 'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
+ 'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',
+ 'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',
+ 'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',
+ 'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',
+ 'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',
+ 'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',
+ 'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',
+ 'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',
+ 'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',
+ 'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',
+ 'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',
+ 'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',
+ 'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',
+ 'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',
+ 'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',
+ 'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',
+ 'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',
+ 'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',
+ 'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc',
+ 'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
+ 'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine',
+ 'writeInfo', 'writeInfoLine',
+ )
+
+ functions_array = (
+ 'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
+ )
+
+ objects = (
+ 'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
+ 'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
+ 'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
+ 'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',
+ 'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',
+ 'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',
+ 'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',
+ 'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',
+ 'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',
+ 'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',
+ 'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',
+ 'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',
+ 'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',
+ 'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',
+ 'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',
+ 'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',
+ 'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti',
+ 'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo',
+ 'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',
+ 'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',
+ 'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',
+ 'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',
+ 'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',
+ 'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
+ 'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
+ 'Weight', 'WordList',
+ )
+
+ variables_numeric = (
+ 'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
+ )
+
+ variables_string = (
+ 'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
+ 'preferencesDirectory', 'newline', 'temporaryDirectory',
+ 'defaultDirectory',
+ )
+
+ object_attributes = (
+ 'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy',
+ )
+
+ tokens = {
+ 'root': [
+ (r'(\s+)(#.*?$)', bygroups(Whitespace, Comment.Single)),
+ (r'^#.*?$', Comment.Single),
+ (r';[^\n]*', Comment.Single),
+ (r'\s+', Whitespace),
+
+ (r'\bprocedure\b', Keyword, 'procedure_definition'),
+ (r'\bcall\b', Keyword, 'procedure_call'),
+ (r'@', Name.Function, 'procedure_call'),
+
+ include('function_call'),
+
+ (words(keywords, suffix=r'\b'), Keyword),
+
+ (r'(\bform\b)(\s+)([^\n]+)',
+ bygroups(Keyword, Whitespace, String), 'old_form'),
+
+ (r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'
+ r'include|execute|system(?:_nocheck)?)(\s+)',
+ bygroups(Keyword, Whitespace), 'string_unquoted'),
+
+ (r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Label)),
+
+ include('variable_name'),
+ include('number'),
+
+ (r'"', String, 'string'),
+
+ (words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'),
+
+ (r'\b[A-Z]', Keyword, 'command'),
+ (r'(\.{3}|[)(,])', Punctuation),
+ ],
+ 'command': [
+ (r'( ?[\w()-]+ ?)', Keyword),
+
+ include('string_interpolated'),
+
+ (r'\.{3}', Keyword, ('#pop', 'old_arguments')),
+ (r':', Keyword, ('#pop', 'comma_list')),
+ (r'\s', Whitespace, '#pop'),
+ ],
+ 'procedure_call': [
+ (r'\s+', Whitespace),
+ (r'([\w.]+)(?:(:)|(?:(\s*)(\()))',
+ bygroups(Name.Function, Punctuation,
+ Text.Whitespace, Punctuation), '#pop'),
+ (r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')),
+ ],
+ 'procedure_definition': [
+ (r'\s', Whitespace),
+ (r'([\w.]+)(\s*?[(:])',
+ bygroups(Name.Function, Whitespace), '#pop'),
+ (r'([\w.]+)([^\n]*)',
+ bygroups(Name.Function, Text), '#pop'),
+ ],
+ 'function_call': [
+ (words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'),
+ (words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'),
+ (words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'),
+ ],
+ 'function': [
+ (r'\s+', Whitespace),
+ (r':', Punctuation, ('#pop', 'comma_list')),
+ (r'\s*\(', Punctuation, ('#pop', 'comma_list')),
+ ],
+ 'comma_list': [
+ (r'(\s*\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
+
+ (r'(\s*)(?:([)\]])|(\n))', bygroups(
+ Whitespace, Punctuation, Whitespace), '#pop'),
+
+ (r'\s+', Whitespace),
+ (r'"', String, 'string'),
+ (r'\b(if|then|else|fi|endif)\b', Keyword),
+
+ include('function_call'),
+ include('variable_name'),
+ include('operator'),
+ include('number'),
+
+ (r'[()]', Text),
+ (r',', Punctuation),
+ ],
+ 'old_arguments': [
+ (r'\n', Whitespace, '#pop'),
+
+ include('variable_name'),
+ include('operator'),
+ include('number'),
+
+ (r'"', String, 'string'),
+ (r'[^\n]', Text),
+ ],
+ 'number': [
+ (r'\n', Whitespace, '#pop'),
+ (r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number),
+ ],
+ 'object_reference': [
+ include('string_interpolated'),
+ (r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin),
+
+ (words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'),
+
+ (r'\$', Name.Builtin),
+ (r'\[', Text, '#pop'),
+ ],
+ 'variable_name': [
+ include('operator'),
+ include('number'),
+
+ (words(variables_string, suffix=r'\$'), Name.Variable.Global),
+ (words(variables_numeric,
+ suffix=r'(?=[^a-zA-Z0-9_."\'$#\[:(]|\s|^|$)'),
+ Name.Variable.Global),
+
+ (words(objects, prefix=r'\b', suffix=r"(_)"),
+ bygroups(Name.Builtin, Name.Builtin),
+ 'object_reference'),
+
+ (r'\.?_?[a-z][\w.]*(\$|#)?', Text),
+ (r'[\[\]]', Punctuation, 'comma_list'),
+
+ include('string_interpolated'),
+ ],
+ 'operator': [
+ (r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator),
+ (r'(?<![\w.])(and|or|not|div|mod)(?![\w.])', Operator.Word),
+ ],
+ 'string_interpolated': [
+ (r'\'[_a-z][^\[\]\'":]*(\[([\d,]+|"[\w,]+")\])?(:[0-9]+)?\'',
+ String.Interpol),
+ ],
+ 'string_unquoted': [
+ (r'(\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
+
+ (r'\n', Whitespace, '#pop'),
+ (r'\s', Whitespace),
+
+ include('string_interpolated'),
+
+ (r"'", String),
+ (r"[^'\n]+", String),
+ ],
+ 'string': [
+ (r'(\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
+
+ (r'"', String, '#pop'),
+
+ include('string_interpolated'),
+
+ (r"'", String),
+ (r'[^\'"\n]+', String),
+ ],
+ 'old_form': [
+ (r'(\s+)(#.*?$)', bygroups(Whitespace, Comment.Single)),
+ (r'\s+', Whitespace),
+
+ (r'(optionmenu|choice)([ \t]+)(\S+)(:)([ \t]+)',
+ bygroups(Keyword, Whitespace, Text, Punctuation, Whitespace), 'number'),
+
+ (r'(option|button)([ \t]+)',
+ bygroups(Keyword, Whitespace), 'string_unquoted'),
+
+ (r'(sentence|text)([ \t]+)(\S+)',
+ bygroups(Keyword, Whitespace, String), 'string_unquoted'),
+
+ (r'(word)([ \t]+)(\S+)([ \t]*)(\S+)?(?:([ \t]+)(.*))?',
+ bygroups(Keyword, Whitespace, Text, Whitespace, Text, Whitespace, Text)),
+
+ (r'(boolean)(\s+\S+\s*)(0|1|"?(?:yes|no)"?)',
+ bygroups(Keyword, Whitespace, Name.Variable)),
+
+ # Ideally processing of the number would happen in the 'number'
+ # but that doesn't seem to work
+ (r'(real|natural|positive|integer)([ \t]+\S+[ \t]*)([+-]?)(\d+(?:\.\d*)?'
+ r'(?:[eE][-+]?\d+)?%?)',
+ bygroups(Keyword, Whitespace, Operator, Number)),
+
+ (r'(comment)(\s+)',
+ bygroups(Keyword, Whitespace), 'string_unquoted'),
+
+ (r'\bendform\b', Keyword, '#pop'),
+ ]
+ }
diff --git a/pygments/lexers/procfile.py b/pygments/lexers/procfile.py
new file mode 100644
index 0000000..72395ce
--- /dev/null
+++ b/pygments/lexers/procfile.py
@@ -0,0 +1,42 @@
+"""
+ pygments.lexers.procfile
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Procfile file format.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Name, Number, String, Text, Punctuation
+
+__all__ = ["ProcfileLexer"]
+
+
+class ProcfileLexer(RegexLexer):
+ """
+ Lexer for Procfile file format.
+
+ The format is used to run processes on Heroku or is used by Foreman or
+ Honcho tools.
+
+ .. versionadded:: 2.10
+ """
+ name = 'Procfile'
+ url = 'https://devcenter.heroku.com/articles/procfile#procfile-format'
+ aliases = ['procfile']
+ filenames = ['Procfile']
+
+ tokens = {
+ 'root': [
+ (r'^([a-z]+)(:)', bygroups(Name.Label, Punctuation)),
+ (r'\s+', Text.Whitespace),
+ (r'"[^"]*"', String),
+ (r"'[^']*'", String),
+ (r'[0-9]+', Number.Integer),
+ (r'\$[a-zA-Z_][\w]*', Name.Variable),
+ (r'(\w+)(=)(\w+)', bygroups(Name.Variable, Punctuation, String)),
+ (r'([\w\-\./]+)', Text),
+ ],
+ }
diff --git a/pygments/lexers/prolog.py b/pygments/lexers/prolog.py
new file mode 100644
index 0000000..2cdceed
--- /dev/null
+++ b/pygments/lexers/prolog.py
@@ -0,0 +1,304 @@
+"""
+ pygments.lexers.prolog
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Prolog and Prolog-like languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['PrologLexer', 'LogtalkLexer']
+
+
+class PrologLexer(RegexLexer):
+ """
+ Lexer for Prolog files.
+ """
+ name = 'Prolog'
+ aliases = ['prolog']
+ filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
+ mimetypes = ['text/x-prolog']
+
+ tokens = {
+ 'root': [
+ (r'/\*', Comment.Multiline, 'nested-comment'),
+ (r'%.*', Comment.Single),
+ # character literal
+ (r'0\'.', String.Char),
+ (r'0b[01]+', Number.Bin),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ # literal with prepended base
+ (r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+', Number.Integer),
+ (r'[\[\](){}|.,;!]', Punctuation),
+ (r':-|-->', Punctuation),
+ (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
+ r'\\[0-7]+\\|\\["\\abcefnrstv]|[^\\"])*"', String.Double),
+ (r"'(?:''|[^'])*'", String.Atom), # quoted atom
+ # Needs to not be followed by an atom.
+ # (r'=(?=\s|[a-zA-Z\[])', Operator),
+ (r'is\b', Operator),
+ (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
+ Operator),
+ (r'(mod|div|not)\b', Operator),
+ (r'_', Keyword), # The don't-care variable
+ (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
+ (r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
+ r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
+ r'(\s*)(:-|-->)',
+ bygroups(Name.Function, Text, Operator)), # function defn
+ (r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
+ r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
+ r'(\s*)(\()',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
+ r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
+ String.Atom), # atom, characters
+ # This one includes !
+ (r'[#&*+\-./:<=>?@\\^~\u00a1-\u00bf\u2010-\u303f]+',
+ String.Atom), # atom, graphics
+ (r'[A-Z_]\w*', Name.Variable),
+ (r'\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
+ ],
+ 'nested-comment': [
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'[^*/]+', Comment.Multiline),
+ (r'[*/]', Comment.Multiline),
+ ],
+ }
+
+ def analyse_text(text):
+ return ':-' in text
+
+
+class LogtalkLexer(RegexLexer):
+ """
+ For Logtalk source code.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'Logtalk'
+ url = 'http://logtalk.org/'
+ aliases = ['logtalk']
+ filenames = ['*.lgt', '*.logtalk']
+ mimetypes = ['text/x-logtalk']
+
+ tokens = {
+ 'root': [
+ # Directives
+ (r'^\s*:-\s', Punctuation, 'directive'),
+ # Comments
+ (r'%.*?\n', Comment),
+ (r'/\*(.|\n)*?\*/', Comment),
+ # Whitespace
+ (r'\n', Text),
+ (r'\s+', Text),
+ # Numbers
+ (r"0'[\\]?.", Number),
+ (r'0b[01]+', Number.Bin),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
+ # Variables
+ (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
+ # Event handlers
+ (r'(after|before)(?=[(])', Keyword),
+ # Message forwarding handler
+ (r'forward(?=[(])', Keyword),
+ # Execution-context methods
+ (r'(context|parameter|this|se(lf|nder))(?=[(])', Keyword),
+ # Reflection
+ (r'(current_predicate|predicate_property)(?=[(])', Keyword),
+ # DCGs and term expansion
+ (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
+ # Entity
+ (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
+ (r'(object|protocol|category)_property(?=[(])', Keyword),
+ # Entity relations
+ (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
+ (r'extends_(object|protocol|category)(?=[(])', Keyword),
+ (r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
+ (r'(instantiat|specializ)es_class(?=[(])', Keyword),
+ # Events
+ (r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
+ # Flags
+ (r'(create|current|set)_logtalk_flag(?=[(])', Keyword),
+ # Compiling, loading, and library paths
+ (r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make(_target_action)?)(?=[(])', Keyword),
+ (r'\blogtalk_make\b', Keyword),
+ # Database
+ (r'(clause|retract(all)?)(?=[(])', Keyword),
+ (r'a(bolish|ssert(a|z))(?=[(])', Keyword),
+ # Control constructs
+ (r'(ca(ll|tch)|throw)(?=[(])', Keyword),
+ (r'(fa(il|lse)|true|(instantiation|system)_error)\b', Keyword),
+ (r'(type|domain|existence|permission|representation|evaluation|resource|syntax)_error(?=[(])', Keyword),
+ # All solutions
+ (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
+ # Multi-threading predicates
+ (r'threaded(_(ca(ll|ncel)|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
+ # Engine predicates
+ (r'threaded_engine(_(create|destroy|self|next|next_reified|yield|post|fetch))?(?=[(])', Keyword),
+ # Term unification
+ (r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
+ # Term creation and decomposition
+ (r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
+ # Evaluable functors
+ (r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
+ (r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
+ (r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword),
+ # Other arithmetic functors
+ (r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword),
+ # Term testing
+ (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword),
+ # Term comparison
+ (r'compare(?=[(])', Keyword),
+ # Stream selection and control
+ (r'(curren|se)t_(in|out)put(?=[(])', Keyword),
+ (r'(open|close)(?=[(])', Keyword),
+ (r'flush_output(?=[(])', Keyword),
+ (r'(at_end_of_stream|flush_output)\b', Keyword),
+ (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
+ # Character and byte input/output
+ (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
+ (r'\bnl\b', Keyword),
+ # Term input/output
+ (r'read(_term)?(?=[(])', Keyword),
+ (r'write(q|_(canonical|term))?(?=[(])', Keyword),
+ (r'(current_)?op(?=[(])', Keyword),
+ (r'(current_)?char_conversion(?=[(])', Keyword),
+ # Atomic term processing
+ (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
+ (r'(char_code|sub_atom)(?=[(])', Keyword),
+ (r'number_c(har|ode)s(?=[(])', Keyword),
+ # Implementation defined hooks functions
+ (r'(se|curren)t_prolog_flag(?=[(])', Keyword),
+ (r'\bhalt\b', Keyword),
+ (r'halt(?=[(])', Keyword),
+ # Message sending operators
+ (r'(::|:|\^\^)', Operator),
+ # External call
+ (r'[{}]', Keyword),
+ # Logic and control
+ (r'(ignore|once)(?=[(])', Keyword),
+ (r'\brepeat\b', Keyword),
+ # Sorting
+ (r'(key)?sort(?=[(])', Keyword),
+ # Bitwise functors
+ (r'(>>|<<|/\\|\\\\|\\)', Operator),
+ # Predicate aliases
+ (r'\bas\b', Operator),
+ # Arithmetic evaluation
+ (r'\bis\b', Keyword),
+ # Arithmetic comparison
+ (r'(=:=|=\\=|<|=<|>=|>)', Operator),
+ # Term creation and decomposition
+ (r'=\.\.', Operator),
+ # Term unification
+ (r'(=|\\=)', Operator),
+ # Term comparison
+ (r'(==|\\==|@=<|@<|@>=|@>)', Operator),
+ # Evaluable functors
+ (r'(//|[-+*/])', Operator),
+ (r'\b(e|pi|div|mod|rem)\b', Operator),
+ # Other arithmetic functors
+ (r'\b\*\*\b', Operator),
+ # DCG rules
+ (r'-->', Operator),
+ # Control constructs
+ (r'([!;]|->)', Operator),
+ # Logic and control
+ (r'\\+', Operator),
+ # Mode operators
+ (r'[?@]', Operator),
+ # Existential quantifier
+ (r'\^', Operator),
+ # Strings
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # Punctuation
+ (r'[()\[\],.|]', Text),
+ # Atoms
+ (r"[a-z][a-zA-Z0-9_]*", Text),
+ (r"'", String, 'quoted_atom'),
+ ],
+
+ 'quoted_atom': [
+ (r"''", String),
+ (r"'", String, '#pop'),
+ (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
+ (r"[^\\'\n]+", String),
+ (r'\\', String),
+ ],
+
+ 'directive': [
+ # Conditional compilation directives
+ (r'(el)?if(?=[(])', Keyword, 'root'),
+ (r'(e(lse|ndif))(?=[.])', Keyword, 'root'),
+ # Entity directives
+ (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
+ (r'(end_(category|object|protocol))(?=[.])', Keyword, 'root'),
+ # Predicate scope directives
+ (r'(public|protected|private)(?=[(])', Keyword, 'root'),
+ # Other directives
+ (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
+ (r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
+ (r'(built_in|dynamic|synchronized|threaded)(?=[.])', Keyword, 'root'),
+ (r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
+ (r'op(?=[(])', Keyword, 'root'),
+ (r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
+ (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
+ (r'[a-z][a-zA-Z0-9_]*(?=[.])', Text, 'root'),
+ ],
+
+ 'entityrelations': [
+ (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
+ # Numbers
+ (r"0'[\\]?.", Number),
+ (r'0b[01]+', Number.Bin),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
+ # Variables
+ (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
+ # Atoms
+ (r"[a-z][a-zA-Z0-9_]*", Text),
+ (r"'", String, 'quoted_atom'),
+ # Strings
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # End of entity-opening directive
+ (r'([)]\.)', Text, 'root'),
+ # Scope operator
+ (r'(::)', Operator),
+ # Punctuation
+ (r'[()\[\],.|]', Text),
+ # Comments
+ (r'%.*?\n', Comment),
+ (r'/\*(.|\n)*?\*/', Comment),
+ # Whitespace
+ (r'\n', Text),
+ (r'\s+', Text),
+ ]
+ }
+
+ def analyse_text(text):
+ if ':- object(' in text:
+ return 1.0
+ elif ':- protocol(' in text:
+ return 1.0
+ elif ':- category(' in text:
+ return 1.0
+ elif re.search(r'^:-\s[a-z]', text, re.M):
+ return 0.9
+ else:
+ return 0.0
diff --git a/pygments/lexers/promql.py b/pygments/lexers/promql.py
new file mode 100644
index 0000000..dcb8b33
--- /dev/null
+++ b/pygments/lexers/promql.py
@@ -0,0 +1,175 @@
+"""
+ pygments.lexers.promql
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Prometheus Query Language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, default, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Whitespace
+
+__all__ = ["PromQLLexer"]
+
+
+class PromQLLexer(RegexLexer):
+ """
+ For PromQL queries.
+
+ For details about the grammar see:
+ https://github.com/prometheus/prometheus/tree/master/promql/parser
+
+ .. versionadded: 2.7
+ """
+
+ name = "PromQL"
+ url = 'https://prometheus.io/docs/prometheus/latest/querying/basics/'
+ aliases = ["promql"]
+ filenames = ["*.promql"]
+
+ base_keywords = (
+ words(
+ (
+ "bool",
+ "by",
+ "group_left",
+ "group_right",
+ "ignoring",
+ "offset",
+ "on",
+ "without",
+ ),
+ suffix=r"\b",
+ ),
+ Keyword,
+ )
+
+ aggregator_keywords = (
+ words(
+ (
+ "sum",
+ "min",
+ "max",
+ "avg",
+ "group",
+ "stddev",
+ "stdvar",
+ "count",
+ "count_values",
+ "bottomk",
+ "topk",
+ "quantile",
+ ),
+ suffix=r"\b",
+ ),
+ Keyword,
+ )
+
+ function_keywords = (
+ words(
+ (
+ "abs",
+ "absent",
+ "absent_over_time",
+ "avg_over_time",
+ "ceil",
+ "changes",
+ "clamp_max",
+ "clamp_min",
+ "count_over_time",
+ "day_of_month",
+ "day_of_week",
+ "days_in_month",
+ "delta",
+ "deriv",
+ "exp",
+ "floor",
+ "histogram_quantile",
+ "holt_winters",
+ "hour",
+ "idelta",
+ "increase",
+ "irate",
+ "label_join",
+ "label_replace",
+ "ln",
+ "log10",
+ "log2",
+ "max_over_time",
+ "min_over_time",
+ "minute",
+ "month",
+ "predict_linear",
+ "quantile_over_time",
+ "rate",
+ "resets",
+ "round",
+ "scalar",
+ "sort",
+ "sort_desc",
+ "sqrt",
+ "stddev_over_time",
+ "stdvar_over_time",
+ "sum_over_time",
+ "time",
+ "timestamp",
+ "vector",
+ "year",
+ ),
+ suffix=r"\b",
+ ),
+ Keyword.Reserved,
+ )
+
+ tokens = {
+ "root": [
+ (r"\n", Whitespace),
+ (r"\s+", Whitespace),
+ (r",", Punctuation),
+ # Keywords
+ base_keywords,
+ aggregator_keywords,
+ function_keywords,
+ # Offsets
+ (r"[1-9][0-9]*[smhdwy]", String),
+ # Numbers
+ (r"-?[0-9]+\.[0-9]+", Number.Float),
+ (r"-?[0-9]+", Number.Integer),
+ # Comments
+ (r"#.*?$", Comment.Single),
+ # Operators
+ (r"(\+|\-|\*|\/|\%|\^)", Operator),
+ (r"==|!=|>=|<=|<|>", Operator),
+ (r"and|or|unless", Operator.Word),
+ # Metrics
+ (r"[_a-zA-Z][a-zA-Z0-9_]+", Name.Variable),
+ # Params
+ (r'(["\'])(.*?)(["\'])', bygroups(Punctuation, String, Punctuation)),
+ # Other states
+ (r"\(", Operator, "function"),
+ (r"\)", Operator),
+ (r"\{", Punctuation, "labels"),
+ (r"\[", Punctuation, "range"),
+ ],
+ "labels": [
+ (r"\}", Punctuation, "#pop"),
+ (r"\n", Whitespace),
+ (r"\s+", Whitespace),
+ (r",", Punctuation),
+ (r'([_a-zA-Z][a-zA-Z0-9_]*?)(\s*?)(=~|!=|=|!~)(\s*?)("|\')(.*?)("|\')',
+ bygroups(Name.Label, Whitespace, Operator, Whitespace,
+ Punctuation, String, Punctuation)),
+ ],
+ "range": [
+ (r"\]", Punctuation, "#pop"),
+ (r"[1-9][0-9]*[smhdwy]", String),
+ ],
+ "function": [
+ (r"\)", Operator, "#pop"),
+ (r"\(", Operator, "#push"),
+ default("#pop"),
+ ],
+ }
diff --git a/pygments/lexers/python.py b/pygments/lexers/python.py
new file mode 100644
index 0000000..0a318a9
--- /dev/null
+++ b/pygments/lexers/python.py
@@ -0,0 +1,1204 @@
+"""
+ pygments.lexers.python
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Python and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import keyword
+
+from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
+ default, words, combined, do_insertions, this, line_re
+from pygments.util import get_bool_opt, shebang_matches
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Other, Error, Whitespace
+from pygments import unistring as uni
+
+__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
+ 'Python2Lexer', 'Python2TracebackLexer',
+ 'CythonLexer', 'DgLexer', 'NumPyLexer']
+
+
+class PythonLexer(RegexLexer):
+ """
+ For Python source code (version 3.x).
+
+ .. versionadded:: 0.10
+
+ .. versionchanged:: 2.5
+ This is now the default ``PythonLexer``. It is still available as the
+ alias ``Python3Lexer``.
+ """
+
+ name = 'Python'
+ url = 'http://www.python.org'
+ aliases = ['python', 'py', 'sage', 'python3', 'py3']
+ filenames = [
+ '*.py',
+ '*.pyw',
+ # Type stubs
+ '*.pyi',
+ # Jython
+ '*.jy',
+ # Sage
+ '*.sage',
+ # SCons
+ '*.sc',
+ 'SConstruct',
+ 'SConscript',
+ # Skylark/Starlark (used by Bazel, Buck, and Pants)
+ '*.bzl',
+ 'BUCK',
+ 'BUILD',
+ 'BUILD.bazel',
+ 'WORKSPACE',
+ # Twisted Application infrastructure
+ '*.tac',
+ ]
+ mimetypes = ['text/x-python', 'application/x-python',
+ 'text/x-python3', 'application/x-python3']
+
+ uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
+
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting (still valid in Py3)
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsaux%]', String.Interpol),
+ # the new style '{}'.format(...) string formatting
+ (r'\{'
+ r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
+ r'(\![sra])?' # conversion
+ r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
+ r'\}', String.Interpol),
+
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%{\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r'%|(\{{1,2})', ttype)
+ # newlines are an error (use "nl" state)
+ ]
+
+ def fstring_rules(ttype):
+ return [
+ # Assuming that a '}' is the closing brace after format specifier.
+ # Sadly, this means that we won't detect syntax error. But it's
+ # more important to parse correct syntax correctly, than to
+ # highlight invalid syntax.
+ (r'\}', String.Interpol),
+ (r'\{', String.Interpol, 'expr-inside-fstring'),
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"{}\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # newlines are an error (use "nl" state)
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+ bygroups(Whitespace, String.Affix, String.Doc)),
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+ bygroups(Whitespace, String.Affix, String.Doc)),
+ (r'\A#!.+$', Comment.Hashbang),
+ (r'#.*$', Comment.Single),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ include('keywords'),
+ include('soft-keywords'),
+ (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
+ (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
+ (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'fromimport'),
+ (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'import'),
+ include('expr'),
+ ],
+ 'expr': [
+ # raw f-strings
+ ('(?i)(rf|fr)(""")',
+ bygroups(String.Affix, String.Double),
+ combined('rfstringescape', 'tdqf')),
+ ("(?i)(rf|fr)(''')",
+ bygroups(String.Affix, String.Single),
+ combined('rfstringescape', 'tsqf')),
+ ('(?i)(rf|fr)(")',
+ bygroups(String.Affix, String.Double),
+ combined('rfstringescape', 'dqf')),
+ ("(?i)(rf|fr)(')",
+ bygroups(String.Affix, String.Single),
+ combined('rfstringescape', 'sqf')),
+ # non-raw f-strings
+ ('([fF])(""")', bygroups(String.Affix, String.Double),
+ combined('fstringescape', 'tdqf')),
+ ("([fF])(''')", bygroups(String.Affix, String.Single),
+ combined('fstringescape', 'tsqf')),
+ ('([fF])(")', bygroups(String.Affix, String.Double),
+ combined('fstringescape', 'dqf')),
+ ("([fF])(')", bygroups(String.Affix, String.Single),
+ combined('fstringescape', 'sqf')),
+ # raw bytes and strings
+ ('(?i)(rb|br|r)(""")',
+ bygroups(String.Affix, String.Double), 'tdqs'),
+ ("(?i)(rb|br|r)(''')",
+ bygroups(String.Affix, String.Single), 'tsqs'),
+ ('(?i)(rb|br|r)(")',
+ bygroups(String.Affix, String.Double), 'dqs'),
+ ("(?i)(rb|br|r)(')",
+ bygroups(String.Affix, String.Single), 'sqs'),
+ # non-raw strings
+ ('([uU]?)(""")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'tdqs')),
+ ("([uU]?)(''')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'tsqs')),
+ ('([uU]?)(")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'dqs')),
+ ("([uU]?)(')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'sqs')),
+ # non-raw bytes
+ ('([bB])(""")', bygroups(String.Affix, String.Double),
+ combined('bytesescape', 'tdqs')),
+ ("([bB])(''')", bygroups(String.Affix, String.Single),
+ combined('bytesescape', 'tsqs')),
+ ('([bB])(")', bygroups(String.Affix, String.Double),
+ combined('bytesescape', 'dqs')),
+ ("([bB])(')", bygroups(String.Affix, String.Single),
+ combined('bytesescape', 'sqs')),
+
+ (r'[^\S\n]+', Text),
+ include('numbers'),
+ (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ include('expr-keywords'),
+ include('builtins'),
+ include('magicfuncs'),
+ include('magicvars'),
+ include('name'),
+ ],
+ 'expr-inside-fstring': [
+ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+ # without format specifier
+ (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
+ r'(\![sraf])?' # conversion
+ r'\}', String.Interpol, '#pop'),
+ # with format specifier
+ # we'll catch the remaining '}' in the outer scope
+ (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
+ r'(\![sraf])?' # conversion
+ r':', String.Interpol, '#pop'),
+ (r'\s+', Whitespace), # allow new lines
+ include('expr'),
+ ],
+ 'expr-inside-fstring-inner': [
+ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+ (r'[])}]', Punctuation, '#pop'),
+ (r'\s+', Whitespace), # allow new lines
+ include('expr'),
+ ],
+ 'expr-keywords': [
+ # Based on https://docs.python.org/3/reference/expressions.html
+ (words((
+ 'async for', 'await', 'else', 'for', 'if', 'lambda',
+ 'yield', 'yield from'), suffix=r'\b'),
+ Keyword),
+ (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
+ ],
+ 'keywords': [
+ (words((
+ 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
+ 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
+ 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
+ 'yield from', 'as', 'with'), suffix=r'\b'),
+ Keyword),
+ (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
+ ],
+ 'soft-keywords': [
+ # `match`, `case` and `_` soft keywords
+ (r'(^[ \t]*)' # at beginning of line + possible indentation
+ r'(match|case)\b' # a possible keyword
+ r'(?![ \t]*(?:' # not followed by...
+ r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
+ r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
+ bygroups(Text, Keyword), 'soft-keywords-inner'),
+ ],
+ 'soft-keywords-inner': [
+ # optional `_` keyword
+ (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
+ default('#pop')
+ ],
+ 'builtins': [
+ (words((
+ '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray',
+ 'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex',
+ 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter',
+ 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr',
+ 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass',
+ 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview',
+ 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print',
+ 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
+ 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple',
+ 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
+ (words((
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
+ 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
+ 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
+ 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
+ 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
+ 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
+ 'NotImplementedError', 'OSError', 'OverflowError',
+ 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
+ 'RuntimeError', 'RuntimeWarning', 'StopIteration',
+ 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
+ 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError',
+ 'Warning', 'WindowsError', 'ZeroDivisionError',
+ # new builtin exceptions from PEP 3151
+ 'BlockingIOError', 'ChildProcessError', 'ConnectionError',
+ 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
+ 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
+ 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
+ 'PermissionError', 'ProcessLookupError', 'TimeoutError',
+ # others new in Python 3
+ 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
+ 'EncodingWarning'),
+ prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Exception),
+ ],
+ 'magicfuncs': [
+ (words((
+ '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__',
+ '__and__', '__anext__', '__await__', '__bool__', '__bytes__',
+ '__call__', '__complex__', '__contains__', '__del__', '__delattr__',
+ '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__',
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__format__',
+ '__ge__', '__get__', '__getattr__', '__getattribute__',
+ '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__',
+ '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__',
+ '__imul__', '__index__', '__init__', '__instancecheck__',
+ '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
+ '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__',
+ '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__',
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
+ '__new__', '__next__', '__or__', '__pos__', '__pow__',
+ '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__',
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__',
+ '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__',
+ '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
+ '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__',
+ '__sub__', '__subclasscheck__', '__truediv__',
+ '__xor__'), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicvars': [
+ (words((
+ '__annotations__', '__bases__', '__class__', '__closure__',
+ '__code__', '__defaults__', '__dict__', '__doc__', '__file__',
+ '__func__', '__globals__', '__kwdefaults__', '__module__',
+ '__mro__', '__name__', '__objclass__', '__qualname__',
+ '__self__', '__slots__', '__weakref__'), suffix=r'\b'),
+ Name.Variable.Magic),
+ ],
+ 'numbers': [
+ (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
+ r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
+ (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
+ (r'0[oO](?:_?[0-7])+', Number.Oct),
+ (r'0[bB](?:_?[01])+', Number.Bin),
+ (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
+ (r'\d(?:_?\d)*', Number.Integer),
+ ],
+ 'name': [
+ (r'@' + uni_name, Name.Decorator),
+ (r'@', Operator), # new matrix multiplication operator
+ (uni_name, Name),
+ ],
+ 'funcname': [
+ include('magicfuncs'),
+ (uni_name, Name.Function, '#pop'),
+ default('#pop'),
+ ],
+ 'classname': [
+ (uni_name, Name.Class, '#pop'),
+ ],
+ 'import': [
+ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
+ (r'\.', Name.Namespace),
+ (uni_name, Name.Namespace),
+ (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
+ default('#pop') # all else: go back
+ ],
+ 'fromimport': [
+ (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
+ (r'\.', Name.Namespace),
+ # if None occurs here, it's "raise x from None", since None can
+ # never be a module name
+ (r'None\b', Name.Builtin.Pseudo, '#pop'),
+ (uni_name, Name.Namespace),
+ default('#pop'),
+ ],
+ 'rfstringescape': [
+ (r'\{\{', String.Escape),
+ (r'\}\}', String.Escape),
+ ],
+ 'fstringescape': [
+ include('rfstringescape'),
+ include('stringescape'),
+ ],
+ 'bytesescape': [
+ (r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'stringescape': [
+ (r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape),
+ include('bytesescape')
+ ],
+ 'fstrings-single': fstring_rules(String.Single),
+ 'fstrings-double': fstring_rules(String.Double),
+ 'strings-single': innerstring_rules(String.Single),
+ 'strings-double': innerstring_rules(String.Double),
+ 'dqf': [
+ (r'"', String.Double, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include('fstrings-double')
+ ],
+ 'sqf': [
+ (r"'", String.Single, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include('fstrings-single')
+ ],
+ 'dqs': [
+ (r'"', String.Double, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include('strings-double')
+ ],
+ 'sqs': [
+ (r"'", String.Single, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include('strings-single')
+ ],
+ 'tdqf': [
+ (r'"""', String.Double, '#pop'),
+ include('fstrings-double'),
+ (r'\n', String.Double)
+ ],
+ 'tsqf': [
+ (r"'''", String.Single, '#pop'),
+ include('fstrings-single'),
+ (r'\n', String.Single)
+ ],
+ 'tdqs': [
+ (r'"""', String.Double, '#pop'),
+ include('strings-double'),
+ (r'\n', String.Double)
+ ],
+ 'tsqs': [
+ (r"'''", String.Single, '#pop'),
+ include('strings-single'),
+ (r'\n', String.Single)
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \
+ 'import ' in text[:1000]
+
+
+Python3Lexer = PythonLexer
+
+
+class Python2Lexer(RegexLexer):
+ """
+ For Python 2.x source code.
+
+ .. versionchanged:: 2.5
+ This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
+ refers to the Python 3 variant. File name patterns like ``*.py`` have
+ been moved to Python 3 as well.
+ """
+
+ name = 'Python 2.x'
+ url = 'http://www.python.org'
+ aliases = ['python2', 'py2']
+ filenames = [] # now taken over by PythonLexer (3.x)
+ mimetypes = ['text/x-python2', 'application/x-python2']
+
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r'%', ttype),
+ # newlines are an error (use "nl" state)
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+ bygroups(Whitespace, String.Affix, String.Doc)),
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+ bygroups(Whitespace, String.Affix, String.Doc)),
+ (r'[^\S\n]+', Text),
+ (r'\A#!.+$', Comment.Hashbang),
+ (r'#.*$', Comment.Single),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
+ include('keywords'),
+ (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
+ (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
+ (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'fromimport'),
+ (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'import'),
+ include('builtins'),
+ include('magicfuncs'),
+ include('magicvars'),
+ include('backtick'),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
+ bygroups(String.Affix, String.Double), 'tdqs'),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
+ bygroups(String.Affix, String.Single), 'tsqs'),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
+ bygroups(String.Affix, String.Double), 'dqs'),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
+ bygroups(String.Affix, String.Single), 'sqs'),
+ ('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'tdqs')),
+ ("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'tsqs')),
+ ('([uUbB]?)(")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'dqs')),
+ ("([uUbB]?)(')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'sqs')),
+ include('name'),
+ include('numbers'),
+ ],
+ 'keywords': [
+ (words((
+ 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
+ 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
+ 'print', 'raise', 'return', 'try', 'while', 'yield',
+ 'yield from', 'as', 'with'), suffix=r'\b'),
+ Keyword),
+ ],
+ 'builtins': [
+ (words((
+ '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
+ 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
+ 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
+ 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
+ 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
+ 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
+ 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
+ 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
+ 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
+ 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
+ 'unichr', 'unicode', 'vars', 'xrange', 'zip'),
+ prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
+ r')\b', Name.Builtin.Pseudo),
+ (words((
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
+ 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
+ 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
+ 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
+ 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
+ 'MemoryError', 'NameError',
+ 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
+ 'PendingDeprecationWarning', 'ReferenceError',
+ 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
+ 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
+ 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
+ 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Exception),
+ ],
+ 'magicfuncs': [
+ (words((
+ '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
+ '__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
+ '__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
+ '__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
+ '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
+ '__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
+ '__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
+ '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
+ '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
+ '__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
+ '__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
+ '__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
+ '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
+ '__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
+ '__unicode__', '__xor__'), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicvars': [
+ (words((
+ '__bases__', '__class__', '__closure__', '__code__', '__defaults__',
+ '__dict__', '__doc__', '__file__', '__func__', '__globals__',
+ '__metaclass__', '__module__', '__mro__', '__name__', '__self__',
+ '__slots__', '__weakref__'),
+ suffix=r'\b'),
+ Name.Variable.Magic),
+ ],
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[bB][01]+', Number.Bin),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+j?', Number.Integer)
+ ],
+ 'backtick': [
+ ('`.*?`', String.Backtick),
+ ],
+ 'name': [
+ (r'@[\w.]+', Name.Decorator),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'funcname': [
+ include('magicfuncs'),
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
+ default('#pop'),
+ ],
+ 'classname': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'(?:[ \t]|\\\n)+', Text),
+ (r'as\b', Keyword.Namespace),
+ (r',', Operator),
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
+ default('#pop') # all else: go back
+ ],
+ 'fromimport': [
+ (r'(?:[ \t]|\\\n)+', Text),
+ (r'import\b', Keyword.Namespace, '#pop'),
+ # if None occurs here, it's "raise x from None", since None can
+ # never be a module name
+ (r'None\b', Name.Builtin.Pseudo, '#pop'),
+ # sadly, in "raise x from y" y will be highlighted as namespace too
+ (r'[a-zA-Z_.][\w.]*', Name.Namespace),
+ # anything else here also means "raise x from y" and is therefore
+ # not an error
+ default('#pop'),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings-single': innerstring_rules(String.Single),
+ 'strings-double': innerstring_rules(String.Double),
+ 'dqs': [
+ (r'"', String.Double, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include('strings-double')
+ ],
+ 'sqs': [
+ (r"'", String.Single, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include('strings-single')
+ ],
+ 'tdqs': [
+ (r'"""', String.Double, '#pop'),
+ include('strings-double'),
+ (r'\n', String.Double)
+ ],
+ 'tsqs': [
+ (r"'''", String.Single, '#pop'),
+ include('strings-single'),
+ (r'\n', String.Single)
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'pythonw?2(\.\d)?')
+
+
+class PythonConsoleLexer(Lexer):
+ """
+ For Python console output or doctests, such as:
+
+ .. sourcecode:: pycon
+
+ >>> a = 'foo'
+ >>> print a
+ foo
+ >>> 1 / 0
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ZeroDivisionError: integer division or modulo by zero
+
+ Additional options:
+
+ `python3`
+ Use Python 3 lexer for code. Default is ``True``.
+
+ .. versionadded:: 1.0
+ .. versionchanged:: 2.5
+ Now defaults to ``True``.
+ """
+ name = 'Python console session'
+ aliases = ['pycon']
+ mimetypes = ['text/x-python-doctest']
+
+ def __init__(self, **options):
+ self.python3 = get_bool_opt(options, 'python3', True)
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ if self.python3:
+ pylexer = PythonLexer(**self.options)
+ tblexer = PythonTracebackLexer(**self.options)
+ else:
+ pylexer = Python2Lexer(**self.options)
+ tblexer = Python2TracebackLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ curtb = ''
+ tbindex = 0
+ tb = 0
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('>>> ') or line.startswith('... '):
+ tb = 0
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:4])]))
+ curcode += line[4:]
+ elif line.rstrip() == '...' and not tb:
+ # only a new >>> prompt can end an exception block
+ # otherwise an ellipsis in place of the traceback frames
+ # will be mishandled
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, '...')]))
+ curcode += line[3:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, pylexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ if (line.startswith('Traceback (most recent call last):') or
+ re.match(' File "[^"]+", line \\d+\\n$', line)):
+ tb = 1
+ curtb = line
+ tbindex = match.start()
+ elif line == 'KeyboardInterrupt\n':
+ yield match.start(), Name.Class, line
+ elif tb:
+ curtb += line
+ if not (line.startswith(' ') or line.strip() == '...'):
+ tb = 0
+ for i, t, v in tblexer.get_tokens_unprocessed(curtb):
+ yield tbindex+i, t, v
+ curtb = ''
+ else:
+ yield match.start(), Generic.Output, line
+ if curcode:
+ yield from do_insertions(insertions,
+ pylexer.get_tokens_unprocessed(curcode))
+ if curtb:
+ for i, t, v in tblexer.get_tokens_unprocessed(curtb):
+ yield tbindex+i, t, v
+
+
+class PythonTracebackLexer(RegexLexer):
+ """
+ For Python 3.x tracebacks, with support for chained exceptions.
+
+ .. versionadded:: 1.0
+
+ .. versionchanged:: 2.5
+ This is now the default ``PythonTracebackLexer``. It is still available
+ as the alias ``Python3TracebackLexer``.
+ """
+
+ name = 'Python Traceback'
+ aliases = ['pytb', 'py3tb']
+ filenames = ['*.pytb', '*.py3tb']
+ mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
+ (r'^During handling of the above exception, another '
+ r'exception occurred:\n\n', Generic.Traceback),
+ (r'^The above exception was the direct cause of the '
+ r'following exception:\n\n', Generic.Traceback),
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
+ (r'^.*\n', Other),
+ ],
+ 'intb': [
+ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
+ (r'^( )(.+)(\n)',
+ bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
+ (r'^([ \t]*)(\.\.\.)(\n)',
+ bygroups(Whitespace, Comment, Whitespace)), # for doctests...
+ (r'^([^:]+)(: )(.+)(\n)',
+ bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
+ (r'^([a-zA-Z_][\w.]*)(:?\n)',
+ bygroups(Generic.Error, Whitespace), '#pop')
+ ],
+ 'markers': [
+ # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
+ # error locations in Python 3.11+, or single-caret markers
+ # for syntax errors before that.
+ (r'^( {4,})([~^]+)(\n)',
+ bygroups(Whitespace, Punctuation.Marker, Whitespace),
+ '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+Python3TracebackLexer = PythonTracebackLexer
+
+
+class Python2TracebackLexer(RegexLexer):
+ """
+ For Python tracebacks.
+
+ .. versionadded:: 0.7
+
+ .. versionchanged:: 2.5
+ This class has been renamed from ``PythonTracebackLexer``.
+ ``PythonTracebackLexer`` now refers to the Python 3 variant.
+ """
+
+ name = 'Python 2.x Traceback'
+ aliases = ['py2tb']
+ filenames = ['*.py2tb']
+ mimetypes = ['text/x-python2-traceback']
+
+ tokens = {
+ 'root': [
+ # Cover both (most recent call last) and (innermost last)
+ # The optional ^C allows us to catch keyboard interrupt signals.
+ (r'^(\^C)?(Traceback.*\n)',
+ bygroups(Text, Generic.Traceback), 'intb'),
+ # SyntaxError starts with this.
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
+ (r'^.*\n', Other),
+ ],
+ 'intb': [
+ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
+ (r'^( )(.+)(\n)',
+ bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
+ (r'^([ \t]*)(\.\.\.)(\n)',
+ bygroups(Text, Comment, Whitespace)), # for doctests...
+ (r'^([^:]+)(: )(.+)(\n)',
+ bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
+ (r'^([a-zA-Z_]\w*)(:?\n)',
+ bygroups(Generic.Error, Whitespace), '#pop')
+ ],
+ 'marker': [
+ # For syntax errors.
+ (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class CythonLexer(RegexLexer):
+ """
+ For Pyrex and Cython source code.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Cython'
+ url = 'http://cython.org'
+ aliases = ['cython', 'pyx', 'pyrex']
+ filenames = ['*.pyx', '*.pxd', '*.pxi']
+ mimetypes = ['text/x-cython', 'application/x-cython']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
+ (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
+ (r'[^\S\n]+', Text),
+ (r'#.*$', Comment),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\\\n', Whitespace),
+ (r'\\', Text),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ (r'(<)([a-zA-Z0-9.?]+)(>)',
+ bygroups(Punctuation, Keyword.Type, Punctuation)),
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
+ (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
+ bygroups(Keyword, Number.Integer, Operator, Name, Operator,
+ Name, Punctuation)),
+ include('keywords'),
+ (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
+ (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
+ # (should actually start a block with only cdefs)
+ (r'(cdef)(:)', bygroups(Keyword, Punctuation)),
+ (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
+ (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
+ (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
+ include('builtins'),
+ include('backtick'),
+ ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
+ ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
+ ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
+ ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
+ ('[uU]?"""', String, combined('stringescape', 'tdqs')),
+ ("[uU]?'''", String, combined('stringescape', 'tsqs')),
+ ('[uU]?"', String, combined('stringescape', 'dqs')),
+ ("[uU]?'", String, combined('stringescape', 'sqs')),
+ include('name'),
+ include('numbers'),
+ ],
+ 'keywords': [
+ (words((
+ 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
+ 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
+ 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
+ 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
+ Keyword),
+ (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
+ ],
+ 'builtins': [
+ (words((
+ '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
+ 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
+ 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
+ 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
+ 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
+ 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
+ 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
+ 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
+ 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
+ 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
+ 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
+ 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
+ r')\b', Name.Builtin.Pseudo),
+ (words((
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
+ 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
+ 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
+ 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
+ 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
+ 'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
+ 'OSError', 'OverflowError', 'OverflowWarning',
+ 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError',
+ 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError',
+ 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
+ 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
+ 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Exception),
+ ],
+ 'numbers': [
+ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'0\d+', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+', Number.Integer)
+ ],
+ 'backtick': [
+ ('`.*?`', String.Backtick),
+ ],
+ 'name': [
+ (r'@\w+', Name.Decorator),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'funcname': [
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop')
+ ],
+ 'cdef': [
+ (r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
+ (r'(struct|enum|union|class)\b', Keyword),
+ (r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
+ bygroups(Name.Function, Text), '#pop'),
+ (r'([a-zA-Z_]\w*)(\s*)(,)',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'from\b', Keyword, '#pop'),
+ (r'as\b', Keyword),
+ (r':', Punctuation, '#pop'),
+ (r'(?=["\'])', Text, '#pop'),
+ (r'[a-zA-Z_]\w*', Keyword.Type),
+ (r'.', Text),
+ ],
+ 'classname': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
+ (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
+ default('#pop') # all else: go back
+ ],
+ 'fromimport': [
+ (r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
+ (r'[a-zA-Z_.][\w.]*', Name.Namespace),
+ # ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
+ default('#pop'),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings': [
+ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ (r'[^\\\'"%\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'%', String)
+ # newlines are an error (use "nl" state)
+ ],
+ 'nl': [
+ (r'\n', String)
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ }
+
+
+class DgLexer(RegexLexer):
+ """
+ Lexer for dg,
+ a functional and object-oriented programming language
+ running on the CPython 3 VM.
+
+ .. versionadded:: 1.6
+ """
+ name = 'dg'
+ aliases = ['dg']
+ filenames = ['*.dg']
+ mimetypes = ['text/x-dg']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'#.*?$', Comment.Single),
+
+ (r'(?i)0b[01]+', Number.Bin),
+ (r'(?i)0o[0-7]+', Number.Oct),
+ (r'(?i)0x[0-9a-f]+', Number.Hex),
+ (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
+ (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
+ (r'(?i)[+-]?[0-9]+j?', Number.Integer),
+
+ (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
+ (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
+ (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
+ (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
+
+ (r"`\w+'*`", Operator),
+ (r'\b(and|in|is|or|where)\b', Operator.Word),
+ (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
+
+ (words((
+ 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
+ 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
+ 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
+ 'super', 'tuple', 'tuple\'', 'type'),
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
+ Name.Builtin),
+ (words((
+ '__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
+ 'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
+ 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
+ 'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
+ 'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
+ 'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
+ 'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
+ 'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
+ Name.Builtin),
+ (r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
+ Name.Builtin.Pseudo),
+
+ (r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
+ Name.Exception),
+ (r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
+ r"SystemExit)(?!['\w])", Name.Exception),
+
+ (r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
+ r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
+
+ (r"[A-Z_]+'*(?!['\w])", Name),
+ (r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
+ (r"\w+'*", Name),
+
+ (r'[()]', Punctuation),
+ (r'.', Error),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'string': [
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ (r'[^\\\'"%\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'%', String),
+ (r'\n', String)
+ ],
+ 'dqs': [
+ (r'"', String, '#pop')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop')
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop')
+ ],
+ }
+
+
+class NumPyLexer(PythonLexer):
+ """
+ A Python lexer recognizing Numerical Python builtins.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'NumPy'
+ url = 'https://numpy.org/'
+ aliases = ['numpy']
+
+ # override the mimetypes to not inherit them from python
+ mimetypes = []
+ filenames = []
+
+ EXTRA_KEYWORDS = {
+ 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
+ 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
+ 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
+ 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
+ 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
+ 'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
+ 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
+ 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
+ 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
+ 'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
+ 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
+ 'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
+ 'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
+ 'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
+ 'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
+ 'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
+ 'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
+ 'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
+ 'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
+ 'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
+ 'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
+ 'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
+ 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
+ 'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
+ 'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
+ 'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
+ 'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
+ 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
+ 'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
+ 'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
+ 'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
+ 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
+ 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
+ 'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
+ 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
+ 'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
+ 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
+ 'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
+ 'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
+ 'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
+ 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
+ 'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
+ 'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
+ 'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
+ 'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
+ 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
+ 'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
+ 'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
+ 'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
+ 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
+ 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
+ 'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
+ 'set_numeric_ops', 'set_printoptions', 'set_string_function',
+ 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
+ 'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
+ 'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
+ 'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
+ 'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
+ 'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
+ 'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
+ 'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
+ 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
+ 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
+ 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
+ }
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in \
+ PythonLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+ def analyse_text(text):
+ ltext = text[:1000]
+ return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
+ 'import ' in ltext) \
+ and ('import numpy' in ltext or 'from numpy import' in ltext)
diff --git a/pygments/lexers/q.py b/pygments/lexers/q.py
new file mode 100644
index 0000000..b445ee8
--- /dev/null
+++ b/pygments/lexers/q.py
@@ -0,0 +1,188 @@
+"""
+ pygments.lexers.q
+ ~~~~~~~~~~~~~~~~~
+
+ Lexer for the Q programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include, bygroups, inherit
+from pygments.token import Comment, Name, Number, Operator, Punctuation, \
+ String, Whitespace, Literal, Generic
+
+__all__ = ["KLexer", "QLexer"]
+
+
+class KLexer(RegexLexer):
+ """
+ For `K <https://code.kx.com/>`_ source code.
+
+ .. versionadded:: 2.12
+ """
+
+ name = "K"
+ aliases = ["k"]
+ filenames = ["*.k"]
+
+ tokens = {
+ "whitespace": [
+ # hashbang script
+ (r"^#!.*", Comment.Hashbang),
+ # Comments
+ (r"^/\s*\n", Comment.Multiline, "comments"),
+ (r"(?<!\S)/.*", Comment.Single),
+ # Whitespace
+ (r"\s+", Whitespace),
+ # Strings
+ (r"\"", String.Double, "strings"),
+ ],
+ "root": [
+ include("whitespace"),
+ include("keywords"),
+ include("declarations"),
+ ],
+ "keywords": [
+ (words(("abs", "acos", "asin", "atan", "avg", "bin",
+ "binr", "by", "cor", "cos", "cov", "dev",
+ "delete", "div", "do", "enlist", "exec", "exit",
+ "exp", "from", "getenv", "hopen", "if", "in",
+ "insert", "last", "like", "log", "max", "min",
+ "prd", "select", "setenv", "sin", "sqrt", "ss",
+ "sum", "tan", "update", "var", "wavg", "while",
+ "within", "wsum", "xexp"),
+ suffix=r"\b"), Operator.Word),
+ ],
+ "declarations": [
+ # Timing
+ (r"^\\ts?", Comment.Preproc),
+ (r"^(\\\w\s+[^/\n]*?)(/.*)",
+ bygroups(Comment.Preproc, Comment.Single)),
+ # Generic System Commands
+ (r"^\\\w.*", Comment.Preproc),
+ # Prompt
+ (r"^[a-zA-Z]\)", Generic.Prompt),
+ # Function Names
+ (r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)(\s*)(\{)",
+ bygroups(Name.Function, Whitespace, Operator, Whitespace, Punctuation),
+ "functions"),
+ # Variable Names
+ (r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)",
+ bygroups(Name.Variable, Whitespace, Operator)),
+ # Functions
+ (r"\{", Punctuation, "functions"),
+ # Parentheses
+ (r"\(", Punctuation, "parentheses"),
+ # Brackets
+ (r"\[", Punctuation, "brackets"),
+ # Errors
+ (r"'`([a-zA-Z][\w.]*)?", Name.Exception),
+ # File Symbols
+ (r"`:([a-zA-Z/][\w./]*)?", String.Symbol),
+ # Symbols
+ (r"`([a-zA-Z][\w.]*)?", String.Symbol),
+ # Numbers
+ include("numbers"),
+ # Variable Names
+ (r"[a-zA-Z][\w.]*", Name),
+ # Operators
+ (r"[-=+*#$%@!~^&:.,<>'\\|/?_]", Operator),
+ # Punctuation
+ (r";", Punctuation),
+ ],
+ "functions": [
+ include("root"),
+ (r"\}", Punctuation, "#pop"),
+ ],
+ "parentheses": [
+ include("root"),
+ (r"\)", Punctuation, "#pop"),
+ ],
+ "brackets": [
+ include("root"),
+ (r"\]", Punctuation, "#pop"),
+ ],
+ "numbers": [
+ # Binary Values
+ (r"[01]+b", Number.Bin),
+ # Nulls/Infinities
+ (r"0[nNwW][cefghijmndzuvtp]?", Number),
+ # Timestamps
+ ((r"(?:[0-9]{4}[.][0-9]{2}[.][0-9]{2}|[0-9]+)"
+ "D(?:[0-9](?:[0-9](?::[0-9]{2}"
+ "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)?"), Literal.Date),
+ # Datetimes
+ ((r"[0-9]{4}[.][0-9]{2}"
+ "(?:m|[.][0-9]{2}(?:T(?:[0-9]{2}:[0-9]{2}"
+ "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)"), Literal.Date),
+ # Times
+ (r"[0-9]{2}:[0-9]{2}(?::[0-9]{2}(?:[.][0-9]{1,3})?)?",
+ Literal.Date),
+ # GUIDs
+ (r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
+ Number.Hex),
+ # Byte Vectors
+ (r"0x[0-9a-fA-F]+", Number.Hex),
+ # Floats
+ (r"([0-9]*[.]?[0-9]+|[0-9]+[.]?[0-9]*)[eE][+-]?[0-9]+[ef]?",
+ Number.Float),
+ (r"([0-9]*[.][0-9]+|[0-9]+[.][0-9]*)[ef]?", Number.Float),
+ (r"[0-9]+[ef]", Number.Float),
+ # Characters
+ (r"[0-9]+c", Number),
+ # Integers
+ (r"[0-9]+[ihtuv]", Number.Integer),
+ # Long Integers
+ (r"[0-9]+[jnp]?", Number.Integer.Long),
+ ],
+ "comments": [
+ (r"[^\\]+", Comment.Multiline),
+ (r"^\\", Comment.Multiline, "#pop"),
+ (r"\\", Comment.Multiline),
+ ],
+ "strings": [
+ (r'[^"\\]+', String.Double),
+ (r"\\.", String.Escape),
+ (r'"', String.Double, "#pop"),
+ ],
+ }
+
+
+class QLexer(KLexer):
+ """
+ For `Q <https://code.kx.com/>`_ source code.
+
+ .. versionadded:: 2.12
+ """
+
+ name = "Q"
+ aliases = ["q"]
+ filenames = ["*.q"]
+
+ tokens = {
+ "root": [
+ (words(("aj", "aj0", "ajf", "ajf0", "all", "and", "any", "asc",
+ "asof", "attr", "avgs", "ceiling", "cols", "count", "cross",
+ "csv", "cut", "deltas", "desc", "differ", "distinct", "dsave",
+ "each", "ej", "ema", "eval", "except", "fby", "fills", "first",
+ "fkeys", "flip", "floor", "get", "group", "gtime", "hclose",
+ "hcount", "hdel", "hsym", "iasc", "idesc", "ij", "ijf",
+ "inter", "inv", "key", "keys", "lj", "ljf", "load", "lower",
+ "lsq", "ltime", "ltrim", "mavg", "maxs", "mcount", "md5",
+ "mdev", "med", "meta", "mins", "mmax", "mmin", "mmu", "mod",
+ "msum", "neg", "next", "not", "null", "or", "over", "parse",
+ "peach", "pj", "prds", "prior", "prev", "rand", "rank", "ratios",
+ "raze", "read0", "read1", "reciprocal", "reval", "reverse",
+ "rload", "rotate", "rsave", "rtrim", "save", "scan", "scov",
+ "sdev", "set", "show", "signum", "ssr", "string", "sublist",
+ "sums", "sv", "svar", "system", "tables", "til", "trim", "txf",
+ "type", "uj", "ujf", "ungroup", "union", "upper", "upsert",
+ "value", "view", "views", "vs", "where", "wj", "wj1", "ww",
+ "xasc", "xbar", "xcol", "xcols", "xdesc", "xgroup", "xkey",
+ "xlog", "xprev", "xrank"),
+ suffix=r"\b"), Name.Builtin,
+ ),
+ inherit,
+ ],
+ }
diff --git a/pygments/lexers/qlik.py b/pygments/lexers/qlik.py
new file mode 100644
index 0000000..bb4defd
--- /dev/null
+++ b/pygments/lexers/qlik.py
@@ -0,0 +1,117 @@
+"""
+ pygments.lexers.qlik
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the qlik scripting language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text
+from pygments.lexers._qlik_builtins import OPERATORS_LIST, STATEMENT_LIST, \
+ SCRIPT_FUNCTIONS, CONSTANT_LIST
+
+__all__ = ["QlikLexer"]
+
+
+class QlikLexer(RegexLexer):
+ """
+ Lexer for qlik code, including .qvs files
+
+ .. versionadded:: 2.12
+ """
+
+ name = "Qlik"
+ aliases = ["qlik", "qlikview", "qliksense", "qlikscript"]
+ filenames = ["*.qvs", "*.qvw"]
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ # Handle multi-line comments
+ "comment": [
+ (r"\*/", Comment.Multiline, "#pop"),
+ (r"[^*]+", Comment.Multiline),
+ ],
+ # Handle numbers
+ "numerics": [
+ (r"\b\d+\.\d+(e\d+)?[fd]?\b", Number.Float),
+ (r"\b\d+\b", Number.Integer),
+ ],
+ # Handle variable names in things
+ "interp": [
+ (
+ r"(\$\()(\w+)(\))",
+ bygroups(String.Interpol, Name.Variable, String.Interpol),
+ ),
+ ],
+ # Handle strings
+ "string": [
+ (r"'", String, "#pop"),
+ include("interp"),
+ (r"[^'$]+", String),
+ (r"\$", String),
+ ],
+ #
+ "assignment": [
+ (r";", Punctuation, "#pop"),
+ include("root"),
+ ],
+ "field_name_quote": [
+ (r'"', String.Symbol, "#pop"),
+ include("interp"),
+ (r"[^\"$]+", String.Symbol),
+ (r"\$", String.Symbol),
+ ],
+ "field_name_bracket": [
+ (r"\]", String.Symbol, "#pop"),
+ include("interp"),
+ (r"[^\]$]+", String.Symbol),
+ (r"\$", String.Symbol),
+ ],
+ "function": [(r"\)", Punctuation, "#pop"), include("root")],
+ "root": [
+ # Whitespace and comments
+ (r"\s+", Text.Whitespace),
+ (r"/\*", Comment.Multiline, "comment"),
+ (r"//.*\n", Comment.Single),
+ # variable assignment
+ (r"(let|set)(\s+)", bygroups(Keyword.Declaration, Text.Whitespace),
+ "assignment"),
+ # Word operators
+ (words(OPERATORS_LIST["words"], prefix=r"\b", suffix=r"\b"),
+ Operator.Word),
+ # Statements
+ (words(STATEMENT_LIST, suffix=r"\b"), Keyword),
+ # Table names
+ (r"[a-z]\w*:", Keyword.Declaration),
+ # Constants
+ (words(CONSTANT_LIST, suffix=r"\b"), Keyword.Constant),
+ # Functions
+ (words(SCRIPT_FUNCTIONS, suffix=r"(?=\s*\()"), Name.Builtin,
+ "function"),
+ # interpolation - e.g. $(variableName)
+ include("interp"),
+ # Quotes denote a field/file name
+ (r'"', String.Symbol, "field_name_quote"),
+ # Square brackets denote a field/file name
+ (r"\[", String.Symbol, "field_name_bracket"),
+ # Strings
+ (r"'", String, "string"),
+ # Numbers
+ include("numerics"),
+ # Operator symbols
+ (words(OPERATORS_LIST["symbols"]), Operator),
+ # Strings denoted by single quotes
+ (r"'.+?'", String),
+ # Words as text
+ (r"\b\w+\b", Text),
+ # Basic punctuation
+ (r"[,;.()\\/]", Punctuation),
+ ],
+ }
diff --git a/pygments/lexers/qvt.py b/pygments/lexers/qvt.py
new file mode 100644
index 0000000..07d1818
--- /dev/null
+++ b/pygments/lexers/qvt.py
@@ -0,0 +1,151 @@
+"""
+ pygments.lexers.qvt
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for QVT Operational language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, combined, default, \
+ words
+from pygments.token import Text, Comment, Operator, Keyword, Punctuation, \
+ Name, String, Number
+
+__all__ = ['QVToLexer']
+
+
+class QVToLexer(RegexLexer):
+ """
+ For the `QVT Operational Mapping language <http://www.omg.org/spec/QVT/1.1/>`_.
+
+ Reference for implementing this: «Meta Object Facility (MOF) 2.0
+ Query/View/Transformation Specification», Version 1.1 - January 2011
+ (http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in
+ particular.
+
+ Notable tokens assignments:
+
+ - Name.Class is assigned to the identifier following any of the following
+ keywords: metamodel, class, exception, primitive, enum, transformation
+ or library
+
+ - Name.Function is assigned to the names of mappings and queries
+
+ - Name.Builtin.Pseudo is assigned to the pre-defined variables 'this',
+ 'self' and 'result'.
+ """
+ # With obvious borrowings & inspiration from the Java, Python and C lexers
+
+ name = 'QVTO'
+ aliases = ['qvto', 'qvt']
+ filenames = ['*.qvto']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'[^\S\n]+', Text),
+ (r'(--|//)(\s*)(directive:)?(.*)$',
+ bygroups(Comment, Comment, Comment.Preproc, Comment)),
+ # Uncomment the following if you want to distinguish between
+ # '/*' and '/**', à la javadoc
+ # (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'\\\n', Text),
+ (r'(and|not|or|xor|##?)\b', Operator.Word),
+ (r'(:{1,2}=|[-+]=)\b', Operator.Word),
+ (r'(@|<<|>>)\b', Keyword), # stereotypes
+ (r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'(true|false|unlimited|null)\b', Keyword.Constant),
+ (r'(this|self|result)\b', Name.Builtin.Pseudo),
+ (r'(var)\b', Keyword.Declaration),
+ (r'(from|import)\b', Keyword.Namespace, 'fromimport'),
+ (r'(metamodel|class|exception|primitive|enum|transformation|'
+ r'library)(\s+)(\w+)',
+ bygroups(Keyword.Word, Text, Name.Class)),
+ (r'(exception)(\s+)(\w+)',
+ bygroups(Keyword.Word, Text, Name.Exception)),
+ (r'(main)\b', Name.Function),
+ (r'(mapping|helper|query)(\s+)',
+ bygroups(Keyword.Declaration, Text), 'operation'),
+ (r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'),
+ (r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b',
+ Keyword.Type),
+ include('keywords'),
+ ('"', String, combined('stringescape', 'dqs')),
+ ("'", String, combined('stringescape', 'sqs')),
+ include('name'),
+ include('numbers'),
+ # (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)',
+ # bygroups(Text, Text, Text)),
+ ],
+
+ 'fromimport': [
+ (r'(?:[ \t]|\\\n)+', Text),
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
+ default('#pop'),
+ ],
+
+ 'operation': [
+ (r'::', Text),
+ (r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()',
+ bygroups(Text, Name.Function, Text, Punctuation), '#pop')
+ ],
+
+ 'assert': [
+ (r'(warning|error|fatal)\b', Keyword, '#pop'),
+ default('#pop'), # all else: go back
+ ],
+
+ 'keywords': [
+ (words((
+ 'abstract', 'access', 'any', 'assert', 'blackbox', 'break',
+ 'case', 'collect', 'collectNested', 'collectOne', 'collectselect',
+ 'collectselectOne', 'composes', 'compute', 'configuration',
+ 'constructor', 'continue', 'datatype', 'default', 'derived',
+ 'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except',
+ 'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if',
+ 'implies', 'in', 'inherits', 'init', 'inout', 'intermediate',
+ 'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn',
+ 'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map',
+ 'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out',
+ 'package', 'population', 'property', 'raise', 'readonly',
+ 'references', 'refines', 'reject', 'resolve', 'resolveIn',
+ 'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne',
+ 'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef',
+ 'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect',
+ 'xmap', 'xselect'), suffix=r'\b'), Keyword),
+ ],
+
+ # There is no need to distinguish between String.Single and
+ # String.Double: 'strings' is factorised for 'dqs' and 'sqs'
+ 'strings': [
+ (r'[^\\\'"\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ ],
+ 'stringescape': [
+ (r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape)
+ ],
+ 'dqs': [ # double-quoted string
+ (r'"', String, '#pop'),
+ (r'\\\\|\\"', String.Escape),
+ include('strings')
+ ],
+ 'sqs': [ # single-quoted string
+ (r"'", String, '#pop'),
+ (r"\\\\|\\'", String.Escape),
+ include('strings')
+ ],
+ 'name': [
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ # numbers: excerpt taken from the python lexer
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+', Number.Float),
+ (r'\d+', Number.Integer)
+ ],
+ }
diff --git a/pygments/lexers/r.py b/pygments/lexers/r.py
new file mode 100644
index 0000000..02c61d2
--- /dev/null
+++ b/pygments/lexers/r.py
@@ -0,0 +1,190 @@
+"""
+ pygments.lexers.r
+ ~~~~~~~~~~~~~~~~~
+
+ Lexers for the R/S languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, do_insertions
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+
+__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
+
+
+line_re = re.compile('.*?\n')
+
+
+class RConsoleLexer(Lexer):
+ """
+ For R console transcripts or R CMD BATCH output files.
+ """
+
+ name = 'RConsole'
+ aliases = ['rconsole', 'rout']
+ filenames = ['*.Rout']
+
+ def get_tokens_unprocessed(self, text):
+ slexer = SLexer(**self.options)
+
+ current_code_block = ''
+ insertions = []
+
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('>') or line.startswith('+'):
+ # Colorize the prompt as such,
+ # then put rest of line into current_code_block
+ insertions.append((len(current_code_block),
+ [(0, Generic.Prompt, line[:2])]))
+ current_code_block += line[2:]
+ else:
+ # We have reached a non-prompt line!
+ # If we have stored prompt lines, need to process them first.
+ if current_code_block:
+ # Weave together the prompts and highlight code.
+ yield from do_insertions(
+ insertions, slexer.get_tokens_unprocessed(current_code_block))
+ # Reset vars for next code block.
+ current_code_block = ''
+ insertions = []
+ # Now process the actual line itself, this is output from R.
+ yield match.start(), Generic.Output, line
+
+ # If we happen to end on a code block with nothing after it, need to
+ # process the last code block. This is neither elegant nor DRY so
+ # should be changed.
+ if current_code_block:
+ yield from do_insertions(
+ insertions, slexer.get_tokens_unprocessed(current_code_block))
+
+
+class SLexer(RegexLexer):
+ """
+ For S, S-plus, and R source code.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'S'
+ aliases = ['splus', 's', 'r']
+ filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
+ mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
+ 'text/x-R', 'text/x-r-history', 'text/x-r-profile']
+
+ valid_name = r'`[^`\\]*(?:\\.[^`\\]*)*`|(?:[a-zA-Z]|\.[A-Za-z_.])[\w.]*|\.'
+ tokens = {
+ 'comments': [
+ (r'#.*$', Comment.Single),
+ ],
+ 'valid_name': [
+ (valid_name, Name),
+ ],
+ 'punctuation': [
+ (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
+ ],
+ 'keywords': [
+ (r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
+ r'(?![\w.])',
+ Keyword.Reserved),
+ ],
+ 'operators': [
+ (r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
+ (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator),
+ ],
+ 'builtin_symbols': [
+ (r'(NULL|NA(_(integer|real|complex|character)_)?|'
+ r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
+ r'(?![\w.])',
+ Keyword.Constant),
+ (r'(T|F)\b', Name.Builtin.Pseudo),
+ ],
+ 'numbers': [
+ # hex number
+ (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
+ # decimal number
+ (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
+ Number),
+ ],
+ 'statements': [
+ include('comments'),
+ # whitespaces
+ (r'\s+', Whitespace),
+ (r'\'', String, 'string_squote'),
+ (r'\"', String, 'string_dquote'),
+ include('builtin_symbols'),
+ include('valid_name'),
+ include('numbers'),
+ include('keywords'),
+ include('punctuation'),
+ include('operators'),
+ ],
+ 'root': [
+ # calls:
+ (r'(%s)\s*(?=\()' % valid_name, Name.Function),
+ include('statements'),
+ # blocks:
+ (r'\{|\}', Punctuation),
+ # (r'\{', Punctuation, 'block'),
+ (r'.', Text),
+ ],
+ # 'block': [
+ # include('statements'),
+ # ('\{', Punctuation, '#push'),
+ # ('\}', Punctuation, '#pop')
+ # ],
+ 'string_squote': [
+ (r'([^\'\\]|\\.)*\'', String, '#pop'),
+ ],
+ 'string_dquote': [
+ (r'([^"\\]|\\.)*"', String, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
+ return 0.11
+
+
+class RdLexer(RegexLexer):
+ """
+ Pygments Lexer for R documentation (Rd) files
+
+ This is a very minimal implementation, highlighting little more
+ than the macros. A description of Rd syntax is found in `Writing R
+ Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
+ and `Parsing Rd files <http://developer.r-project.org/parseRd.pdf>`_.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Rd'
+ aliases = ['rd']
+ filenames = ['*.Rd']
+ mimetypes = ['text/x-r-doc']
+
+ # To account for verbatim / LaTeX-like / and R-like areas
+ # would require parsing.
+ tokens = {
+ 'root': [
+ # catch escaped brackets and percent sign
+ (r'\\[\\{}%]', String.Escape),
+ # comments
+ (r'%.*$', Comment),
+ # special macros with no arguments
+ (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
+ # macros
+ (r'\\[a-zA-Z]+\b', Keyword),
+ # special preprocessor macros
+ (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
+ # non-escaped brackets
+ (r'[{}]', Name.Builtin),
+ # everything else
+ (r'[^\\%\n{}]+', Text),
+ (r'.', Text),
+ ]
+ }
diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py
new file mode 100644
index 0000000..3919a9b
--- /dev/null
+++ b/pygments/lexers/rdf.py
@@ -0,0 +1,462 @@
+"""
+ pygments.lexers.rdf
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for semantic web and RDF query languages and markup.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, default
+from pygments.token import Keyword, Punctuation, String, Number, Operator, \
+ Generic, Whitespace, Name, Literal, Comment, Text
+
+__all__ = ['SparqlLexer', 'TurtleLexer', 'ShExCLexer']
+
+
+class SparqlLexer(RegexLexer):
+ """
+ Lexer for `SPARQL <https://www.w3.org/TR/sparql11-query/>`_ query language.
+
+ .. versionadded:: 2.0
+ """
+ name = 'SPARQL'
+ aliases = ['sparql']
+ filenames = ['*.rq', '*.sparql']
+ mimetypes = ['application/sparql-query']
+
+ # character group definitions ::
+
+ PN_CHARS_BASE_GRP = ('a-zA-Z'
+ '\u00c0-\u00d6'
+ '\u00d8-\u00f6'
+ '\u00f8-\u02ff'
+ '\u0370-\u037d'
+ '\u037f-\u1fff'
+ '\u200c-\u200d'
+ '\u2070-\u218f'
+ '\u2c00-\u2fef'
+ '\u3001-\ud7ff'
+ '\uf900-\ufdcf'
+ '\ufdf0-\ufffd')
+
+ PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+ PN_CHARS_GRP = (PN_CHARS_U_GRP +
+ r'\-' +
+ r'0-9' +
+ '\u00b7' +
+ '\u0300-\u036f' +
+ '\u203f-\u2040')
+
+ HEX_GRP = '0-9A-Fa-f'
+
+ PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
+
+ # terminal productions ::
+
+ PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+
+ PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
+
+ PN_CHARS = '[' + PN_CHARS_GRP + ']'
+
+ HEX = '[' + HEX_GRP + ']'
+
+ PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+
+ IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
+
+ BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
+ '.]*' + PN_CHARS + ')?'
+
+ PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+
+ VARNAME = '[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
+ '0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
+
+ PERCENT = '%' + HEX + HEX
+
+ PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
+
+ PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
+
+ PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+ '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+ PN_CHARS_GRP + ':]|' + PLX + '))?')
+
+ EXPONENT = r'[eE][+-]?\d+'
+
+ # Lexer token definitions ::
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ # keywords ::
+ (r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|'
+ r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
+ r'offset|values|bindings|load|into|clear|drop|create|add|move|copy|'
+ r'insert\s+data|delete\s+data|delete\s+where|with|delete|insert|'
+ r'using\s+named|using|graph|default|named|all|optional|service|'
+ r'silent|bind|undef|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
+ (r'(a)\b', Keyword),
+ # IRIs ::
+ ('(' + IRIREF + ')', Name.Label),
+ # blank nodes ::
+ ('(' + BLANK_NODE_LABEL + ')', Name.Label),
+ # # variables ::
+ ('[?$]' + VARNAME, Name.Variable),
+ # prefixed names ::
+ (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?',
+ bygroups(Name.Namespace, Punctuation, Name.Tag)),
+ # function names ::
+ (r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
+ r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
+ r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
+ r'hours|minutes|seconds|timezone|tz|now|uuid|struuid|md5|sha1|sha256|sha384|'
+ r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
+ r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
+ r'count|sum|min|max|avg|sample|group_concat|separator)\b',
+ Name.Function),
+ # boolean literals ::
+ (r'(true|false)', Keyword.Constant),
+ # double literals ::
+ (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float),
+ # decimal literals ::
+ (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
+ # integer literals ::
+ (r'[+\-]?\d+', Number.Integer),
+ # operators ::
+ (r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
+ # punctuation characters ::
+ (r'[(){}.;,:^\[\]]', Punctuation),
+ # line comments ::
+ (r'#[^\n]*', Comment),
+ # strings ::
+ (r'"""', String, 'triple-double-quoted-string'),
+ (r'"', String, 'single-double-quoted-string'),
+ (r"'''", String, 'triple-single-quoted-string'),
+ (r"'", String, 'single-single-quoted-string'),
+ ],
+ 'triple-double-quoted-string': [
+ (r'"""', String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'single-double-quoted-string': [
+ (r'"', String, 'end-of-string'),
+ (r'[^"\\\n]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'triple-single-quoted-string': [
+ (r"'''", String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String.Escape, 'string-escape'),
+ ],
+ 'single-single-quoted-string': [
+ (r"'", String, 'end-of-string'),
+ (r"[^'\\\n]+", String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'string-escape': [
+ (r'u' + HEX + '{4}', String.Escape, '#pop'),
+ (r'U' + HEX + '{8}', String.Escape, '#pop'),
+ (r'.', String.Escape, '#pop'),
+ ],
+ 'end-of-string': [
+ (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
+ bygroups(Operator, Name.Function), '#pop:2'),
+ (r'\^\^', Operator, '#pop:2'),
+ default('#pop:2'),
+ ],
+ }
+
+
+class TurtleLexer(RegexLexer):
+ """
+ Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Turtle'
+ aliases = ['turtle']
+ filenames = ['*.ttl']
+ mimetypes = ['text/turtle', 'application/x-turtle']
+
+ # character group definitions ::
+ PN_CHARS_BASE_GRP = ('a-zA-Z'
+ '\u00c0-\u00d6'
+ '\u00d8-\u00f6'
+ '\u00f8-\u02ff'
+ '\u0370-\u037d'
+ '\u037f-\u1fff'
+ '\u200c-\u200d'
+ '\u2070-\u218f'
+ '\u2c00-\u2fef'
+ '\u3001-\ud7ff'
+ '\uf900-\ufdcf'
+ '\ufdf0-\ufffd')
+
+ PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+ PN_CHARS_GRP = (PN_CHARS_U_GRP +
+ r'\-' +
+ r'0-9' +
+ '\u00b7' +
+ '\u0300-\u036f' +
+ '\u203f-\u2040')
+
+ PN_CHARS = '[' + PN_CHARS_GRP + ']'
+
+ PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+
+ PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+
+ HEX_GRP = '0-9A-Fa-f'
+
+ HEX = '[' + HEX_GRP + ']'
+
+ PERCENT = '%' + HEX + HEX
+
+ PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
+
+ PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+
+ PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
+
+ PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
+
+ PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+ '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+ PN_CHARS_GRP + ':]|' + PLX + '))?')
+
+ patterns = {
+ 'PNAME_NS': r'((?:[a-zA-Z][\w-]*)?\:)', # Simplified character range
+ 'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
+ }
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+
+ # Base / prefix
+ (r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
+ bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
+ Punctuation)),
+ (r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
+ bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
+ Name.Variable, Whitespace, Punctuation)),
+
+ # The shorthand predicate 'a'
+ (r'(?<=\s)a(?=\s)', Keyword.Type),
+
+ # IRIREF
+ (r'%(IRIREF)s' % patterns, Name.Variable),
+
+ # PrefixedName
+ (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?',
+ bygroups(Name.Namespace, Punctuation, Name.Tag)),
+
+ # Comment
+ (r'#[^\n]+', Comment),
+
+ (r'\b(true|false)\b', Literal),
+ (r'[+\-]?\d*\.\d+', Number.Float),
+ (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
+ (r'[+\-]?\d+', Number.Integer),
+ (r'[\[\](){}.;,:^]', Punctuation),
+
+ (r'"""', String, 'triple-double-quoted-string'),
+ (r'"', String, 'single-double-quoted-string'),
+ (r"'''", String, 'triple-single-quoted-string'),
+ (r"'", String, 'single-single-quoted-string'),
+ ],
+ 'triple-double-quoted-string': [
+ (r'"""', String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'single-double-quoted-string': [
+ (r'"', String, 'end-of-string'),
+ (r'[^"\\\n]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'triple-single-quoted-string': [
+ (r"'''", String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'single-single-quoted-string': [
+ (r"'", String, 'end-of-string'),
+ (r"[^'\\\n]+", String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'string-escape': [
+ (r'.', String, '#pop'),
+ ],
+ 'end-of-string': [
+ (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
+ bygroups(Operator, Generic.Emph), '#pop:2'),
+
+ (r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'),
+
+ default('#pop:2'),
+
+ ],
+ }
+
+ # Turtle and Tera Term macro files share the same file extension
+ # but each has a recognizable and distinct syntax.
+ def analyse_text(text):
+ for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '):
+ if re.search(r'^\s*%s' % t, text):
+ return 0.80
+
+
+class ShExCLexer(RegexLexer):
+ """
+ Lexer for `ShExC <https://shex.io/shex-semantics/#shexc>`_ shape expressions language syntax.
+ """
+ name = 'ShExC'
+ aliases = ['shexc', 'shex']
+ filenames = ['*.shex']
+ mimetypes = ['text/shex']
+
+ # character group definitions ::
+
+ PN_CHARS_BASE_GRP = ('a-zA-Z'
+ '\u00c0-\u00d6'
+ '\u00d8-\u00f6'
+ '\u00f8-\u02ff'
+ '\u0370-\u037d'
+ '\u037f-\u1fff'
+ '\u200c-\u200d'
+ '\u2070-\u218f'
+ '\u2c00-\u2fef'
+ '\u3001-\ud7ff'
+ '\uf900-\ufdcf'
+ '\ufdf0-\ufffd')
+
+ PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+ PN_CHARS_GRP = (PN_CHARS_U_GRP +
+ r'\-' +
+ r'0-9' +
+ '\u00b7' +
+ '\u0300-\u036f' +
+ '\u203f-\u2040')
+
+ HEX_GRP = '0-9A-Fa-f'
+
+ PN_LOCAL_ESC_CHARS_GRP = r"_~.\-!$&'()*+,;=/?#@%"
+
+ # terminal productions ::
+
+ PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+
+ PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
+
+ PN_CHARS = '[' + PN_CHARS_GRP + ']'
+
+ HEX = '[' + HEX_GRP + ']'
+
+ PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+
+ UCHAR_NO_BACKSLASH = '(?:u' + HEX + '{4}|U' + HEX + '{8})'
+
+ UCHAR = r'\\' + UCHAR_NO_BACKSLASH
+
+ IRIREF = r'<(?:[^\x00-\x20<>"{}|^`\\]|' + UCHAR + ')*>'
+
+ BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
+ '.]*' + PN_CHARS + ')?'
+
+ PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+
+ PERCENT = '%' + HEX + HEX
+
+ PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
+
+ PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
+
+ PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+ '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+ PN_CHARS_GRP + ':]|' + PLX + '))?')
+
+ EXPONENT = r'[eE][+-]?\d+'
+
+ # Lexer token definitions ::
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ # keywords ::
+ (r'(?i)(base|prefix|start|external|'
+ r'literal|iri|bnode|nonliteral|length|minlength|maxlength|'
+ r'mininclusive|minexclusive|maxinclusive|maxexclusive|'
+ r'totaldigits|fractiondigits|'
+ r'closed|extra)\b', Keyword),
+ (r'(a)\b', Keyword),
+ # IRIs ::
+ ('(' + IRIREF + ')', Name.Label),
+ # blank nodes ::
+ ('(' + BLANK_NODE_LABEL + ')', Name.Label),
+ # prefixed names ::
+ (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + ')?',
+ bygroups(Name.Namespace, Punctuation, Name.Tag)),
+ # boolean literals ::
+ (r'(true|false)', Keyword.Constant),
+ # double literals ::
+ (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float),
+ # decimal literals ::
+ (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
+ # integer literals ::
+ (r'[+\-]?\d+', Number.Integer),
+ # operators ::
+ (r'[@|$&=*+?^\-~]', Operator),
+ # operator keywords ::
+ (r'(?i)(and|or|not)\b', Operator.Word),
+ # punctuation characters ::
+ (r'[(){}.;,:^\[\]]', Punctuation),
+ # line comments ::
+ (r'#[^\n]*', Comment),
+ # strings ::
+ (r'"""', String, 'triple-double-quoted-string'),
+ (r'"', String, 'single-double-quoted-string'),
+ (r"'''", String, 'triple-single-quoted-string'),
+ (r"'", String, 'single-single-quoted-string'),
+ ],
+ 'triple-double-quoted-string': [
+ (r'"""', String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'single-double-quoted-string': [
+ (r'"', String, 'end-of-string'),
+ (r'[^"\\\n]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'triple-single-quoted-string': [
+ (r"'''", String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String.Escape, 'string-escape'),
+ ],
+ 'single-single-quoted-string': [
+ (r"'", String, 'end-of-string'),
+ (r"[^'\\\n]+", String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'string-escape': [
+ (UCHAR_NO_BACKSLASH, String.Escape, '#pop'),
+ (r'.', String.Escape, '#pop'),
+ ],
+ 'end-of-string': [
+ (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
+ bygroups(Operator, Name.Function), '#pop:2'),
+ (r'\^\^', Operator, '#pop:2'),
+ default('#pop:2'),
+ ],
+ }
diff --git a/pygments/lexers/rebol.py b/pygments/lexers/rebol.py
new file mode 100644
index 0000000..7d363dd
--- /dev/null
+++ b/pygments/lexers/rebol.py
@@ -0,0 +1,430 @@
+"""
+ pygments.lexers.rebol
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the REBOL and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Generic, Whitespace
+
+__all__ = ['RebolLexer', 'RedLexer']
+
+
+class RebolLexer(RegexLexer):
+ """
+ A `REBOL <http://www.rebol.com/>`_ lexer.
+
+ .. versionadded:: 1.1
+ """
+ name = 'REBOL'
+ aliases = ['rebol']
+ filenames = ['*.r', '*.r3', '*.reb']
+ mimetypes = ['text/x-rebol']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
+
+ def word_callback(lexer, match):
+ word = match.group()
+
+ if re.match(".*:$", word):
+ yield match.start(), Generic.Subheading, word
+ elif re.match(
+ r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
+ r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
+ r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
+ r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
+ r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
+ r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
+ r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
+ r'while|compress|decompress|secure|open|close|read|read-io|'
+ r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
+ r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
+ r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
+ r'browse|launch|stats|get-modes|set-modes|to-local-file|'
+ r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
+ r'hide|draw|show|size-text|textinfo|offset-to-caret|'
+ r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
+ r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
+ r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
+ r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
+ r'rsa-encrypt)$', word):
+ yield match.start(), Name.Builtin, word
+ elif re.match(
+ r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
+ r'minimum|maximum|negate|complement|absolute|random|head|tail|'
+ r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
+ r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
+ r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
+ r'copy)$', word):
+ yield match.start(), Name.Function, word
+ elif re.match(
+ r'(error|source|input|license|help|install|echo|Usage|with|func|'
+ r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
+ r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
+ r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
+ r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
+ r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
+ r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
+ r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
+ r'write-user|save-user|set-user-name|protect-system|parse-xml|'
+ r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
+ r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
+ r'request-dir|center-face|do-events|net-error|decode-url|'
+ r'parse-header|parse-header-date|parse-email-addrs|import-email|'
+ r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
+ r'find-key-face|do-face|viewtop|confine|find-window|'
+ r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
+ r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
+ r'read-thru|load-thru|do-thru|launch-thru|load-image|'
+ r'request-download|do-face-alt|set-font|set-para|get-style|'
+ r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
+ r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
+ r'resize-face|load-stock|load-stock-block|notify|request|flash|'
+ r'request-color|request-pass|request-text|request-list|'
+ r'request-date|request-file|dbug|editor|link-relative-path|'
+ r'emailer|parse-error)$', word):
+ yield match.start(), Keyword.Namespace, word
+ elif re.match(
+ r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
+ r'return|exit|break)$', word):
+ yield match.start(), Name.Exception, word
+ elif re.match('REBOL$', word):
+ yield match.start(), Generic.Heading, word
+ elif re.match("to-.*", word):
+ yield match.start(), Keyword, word
+ elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
+ word):
+ yield match.start(), Operator, word
+ elif re.match(r".*\?$", word):
+ yield match.start(), Keyword, word
+ elif re.match(r".*\!$", word):
+ yield match.start(), Keyword.Type, word
+ elif re.match("'.*", word):
+ yield match.start(), Name.Variable.Instance, word # lit-word
+ elif re.match("#.*", word):
+ yield match.start(), Name.Label, word # issue
+ elif re.match("%.*", word):
+ yield match.start(), Name.Decorator, word # file
+ else:
+ yield match.start(), Name.Variable, word
+
+ tokens = {
+ 'root': [
+ (r'[^R]+', Comment),
+ (r'REBOL\s+\[', Generic.Strong, 'script'),
+ (r'R', Comment)
+ ],
+ 'script': [
+ (r'\s+', Text),
+ (r'#"', String.Char, 'char'),
+ (r'#\{[0-9a-f]*\}', Number.Hex),
+ (r'2#\{', Number.Hex, 'bin2'),
+ (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
+ (r'"', String, 'string'),
+ (r'\{', String, 'string2'),
+ (r';#+.*\n', Comment.Special),
+ (r';\*+.*\n', Comment.Preproc),
+ (r';.*\n', Comment),
+ (r'%"', Name.Decorator, 'stringFile'),
+ (r'%[^(^{")\s\[\]]+', Name.Decorator),
+ (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
+ (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
+ (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
+ r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
+ (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
+ (r'\d+X\d+', Keyword.Constant), # pair
+ (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
+ (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
+ (r'[+-]?\d+(\'\d+)?', Number),
+ (r'[\[\]()]', Generic.Strong),
+ (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
+ (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
+ (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
+ (r'comment\s"', Comment, 'commentString1'),
+ (r'comment\s\{', Comment, 'commentString2'),
+ (r'comment\s\[', Comment, 'commentBlock'),
+ (r'comment\s[^(\s{"\[]+', Comment),
+ (r'/[^(^{")\s/[\]]*', Name.Attribute),
+ (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
+ (r'<[\w:.-]*>', Name.Tag),
+ (r'<[^(<>\s")]+', Name.Tag, 'tag'),
+ (r'([^(^{")\s]+)', Text),
+ ],
+ 'string': [
+ (r'[^(^")]+', String),
+ (escape_re, String.Escape),
+ (r'[(|)]+', String),
+ (r'\^.', String.Escape),
+ (r'"', String, '#pop'),
+ ],
+ 'string2': [
+ (r'[^(^{})]+', String),
+ (escape_re, String.Escape),
+ (r'[(|)]+', String),
+ (r'\^.', String.Escape),
+ (r'\{', String, '#push'),
+ (r'\}', String, '#pop'),
+ ],
+ 'stringFile': [
+ (r'[^(^")]+', Name.Decorator),
+ (escape_re, Name.Decorator),
+ (r'\^.', Name.Decorator),
+ (r'"', Name.Decorator, '#pop'),
+ ],
+ 'char': [
+ (escape_re + '"', String.Char, '#pop'),
+ (r'\^."', String.Char, '#pop'),
+ (r'."', String.Char, '#pop'),
+ ],
+ 'tag': [
+ (escape_re, Name.Tag),
+ (r'"', Name.Tag, 'tagString'),
+ (r'[^(<>\r\n")]+', Name.Tag),
+ (r'>', Name.Tag, '#pop'),
+ ],
+ 'tagString': [
+ (r'[^(^")]+', Name.Tag),
+ (escape_re, Name.Tag),
+ (r'[(|)]+', Name.Tag),
+ (r'\^.', Name.Tag),
+ (r'"', Name.Tag, '#pop'),
+ ],
+ 'tuple': [
+ (r'(\d+\.)+', Keyword.Constant),
+ (r'\d+', Keyword.Constant, '#pop'),
+ ],
+ 'bin2': [
+ (r'\s+', Number.Hex),
+ (r'([01]\s*){8}', Number.Hex),
+ (r'\}', Number.Hex, '#pop'),
+ ],
+ 'commentString1': [
+ (r'[^(^")]+', Comment),
+ (escape_re, Comment),
+ (r'[(|)]+', Comment),
+ (r'\^.', Comment),
+ (r'"', Comment, '#pop'),
+ ],
+ 'commentString2': [
+ (r'[^(^{})]+', Comment),
+ (escape_re, Comment),
+ (r'[(|)]+', Comment),
+ (r'\^.', Comment),
+ (r'\{', Comment, '#push'),
+ (r'\}', Comment, '#pop'),
+ ],
+ 'commentBlock': [
+ (r'\[', Comment, '#push'),
+ (r'\]', Comment, '#pop'),
+ (r'"', Comment, "commentString1"),
+ (r'\{', Comment, "commentString2"),
+ (r'[^(\[\]"{)]+', Comment),
+ ],
+ }
+
+ def analyse_text(text):
+ """
+ Check if code contains REBOL header and so it probably not R code
+ """
+ if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
+ # The code starts with REBOL header
+ return 1.0
+ elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE):
+ # The code contains REBOL header but also some text before it
+ return 0.5
+
+
+class RedLexer(RegexLexer):
+ """
+ A `Red-language <http://www.red-lang.org/>`_ lexer.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Red'
+ aliases = ['red', 'red/system']
+ filenames = ['*.red', '*.reds']
+ mimetypes = ['text/x-red', 'text/x-red-system']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
+
+ def word_callback(lexer, match):
+ word = match.group()
+
+ if re.match(".*:$", word):
+ yield match.start(), Generic.Subheading, word
+ elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
+ r'foreach|forall|func|function|does|has|switch|'
+ r'case|reduce|compose|get|set|print|prin|equal\?|'
+ r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
+ r'greater-or-equal\?|same\?|not|type\?|stats|'
+ r'bind|union|replace|charset|routine)$', word):
+ yield match.start(), Name.Builtin, word
+ elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
+ r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
+ r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
+ r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
+ r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
+ r'update|write)$', word):
+ yield match.start(), Name.Function, word
+ elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
+ r'none|crlf|dot|null-byte)$', word):
+ yield match.start(), Name.Builtin.Pseudo, word
+ elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
+ r'#switch|#default|#get-definition)$', word):
+ yield match.start(), Keyword.Namespace, word
+ elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
+ r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
+ r'quote|forever)$', word):
+ yield match.start(), Name.Exception, word
+ elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
+ r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
+ r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
+ r'any-struct\?|none\?|word\?|any-series\?)$', word):
+ yield match.start(), Keyword, word
+ elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
+ yield match.start(), Keyword.Namespace, word
+ elif re.match("to-.*", word):
+ yield match.start(), Keyword, word
+ elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
+ r'<<<|>>>|<<|>>|<|>%)$', word):
+ yield match.start(), Operator, word
+ elif re.match(r".*\!$", word):
+ yield match.start(), Keyword.Type, word
+ elif re.match("'.*", word):
+ yield match.start(), Name.Variable.Instance, word # lit-word
+ elif re.match("#.*", word):
+ yield match.start(), Name.Label, word # issue
+ elif re.match("%.*", word):
+ yield match.start(), Name.Decorator, word # file
+ elif re.match(":.*", word):
+ yield match.start(), Generic.Subheading, word # get-word
+ else:
+ yield match.start(), Name.Variable, word
+
+ tokens = {
+ 'root': [
+ (r'[^R]+', Comment),
+ (r'Red/System\s+\[', Generic.Strong, 'script'),
+ (r'Red\s+\[', Generic.Strong, 'script'),
+ (r'R', Comment)
+ ],
+ 'script': [
+ (r'\s+', Text),
+ (r'#"', String.Char, 'char'),
+ (r'#\{[0-9a-f\s]*\}', Number.Hex),
+ (r'2#\{', Number.Hex, 'bin2'),
+ (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
+ (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
+ bygroups(Number.Hex, Name.Variable, Whitespace)),
+ (r'"', String, 'string'),
+ (r'\{', String, 'string2'),
+ (r';#+.*\n', Comment.Special),
+ (r';\*+.*\n', Comment.Preproc),
+ (r';.*\n', Comment),
+ (r'%"', Name.Decorator, 'stringFile'),
+ (r'%[^(^{")\s\[\]]+', Name.Decorator),
+ (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
+ (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
+ (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
+ r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
+ (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
+ (r'\d+X\d+', Keyword.Constant), # pair
+ (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
+ (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
+ (r'[+-]?\d+(\'\d+)?', Number),
+ (r'[\[\]()]', Generic.Strong),
+ (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
+ (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
+ (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
+ (r'comment\s"', Comment, 'commentString1'),
+ (r'comment\s\{', Comment, 'commentString2'),
+ (r'comment\s\[', Comment, 'commentBlock'),
+ (r'comment\s[^(\s{"\[]+', Comment),
+ (r'/[^(^{^")\s/[\]]*', Name.Attribute),
+ (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
+ (r'<[\w:.-]*>', Name.Tag),
+ (r'<[^(<>\s")]+', Name.Tag, 'tag'),
+ (r'([^(^{")\s]+)', Text),
+ ],
+ 'string': [
+ (r'[^(^")]+', String),
+ (escape_re, String.Escape),
+ (r'[(|)]+', String),
+ (r'\^.', String.Escape),
+ (r'"', String, '#pop'),
+ ],
+ 'string2': [
+ (r'[^(^{})]+', String),
+ (escape_re, String.Escape),
+ (r'[(|)]+', String),
+ (r'\^.', String.Escape),
+ (r'\{', String, '#push'),
+ (r'\}', String, '#pop'),
+ ],
+ 'stringFile': [
+ (r'[^(^")]+', Name.Decorator),
+ (escape_re, Name.Decorator),
+ (r'\^.', Name.Decorator),
+ (r'"', Name.Decorator, '#pop'),
+ ],
+ 'char': [
+ (escape_re + '"', String.Char, '#pop'),
+ (r'\^."', String.Char, '#pop'),
+ (r'."', String.Char, '#pop'),
+ ],
+ 'tag': [
+ (escape_re, Name.Tag),
+ (r'"', Name.Tag, 'tagString'),
+ (r'[^(<>\r\n")]+', Name.Tag),
+ (r'>', Name.Tag, '#pop'),
+ ],
+ 'tagString': [
+ (r'[^(^")]+', Name.Tag),
+ (escape_re, Name.Tag),
+ (r'[(|)]+', Name.Tag),
+ (r'\^.', Name.Tag),
+ (r'"', Name.Tag, '#pop'),
+ ],
+ 'tuple': [
+ (r'(\d+\.)+', Keyword.Constant),
+ (r'\d+', Keyword.Constant, '#pop'),
+ ],
+ 'bin2': [
+ (r'\s+', Number.Hex),
+ (r'([01]\s*){8}', Number.Hex),
+ (r'\}', Number.Hex, '#pop'),
+ ],
+ 'commentString1': [
+ (r'[^(^")]+', Comment),
+ (escape_re, Comment),
+ (r'[(|)]+', Comment),
+ (r'\^.', Comment),
+ (r'"', Comment, '#pop'),
+ ],
+ 'commentString2': [
+ (r'[^(^{})]+', Comment),
+ (escape_re, Comment),
+ (r'[(|)]+', Comment),
+ (r'\^.', Comment),
+ (r'\{', Comment, '#push'),
+ (r'\}', Comment, '#pop'),
+ ],
+ 'commentBlock': [
+ (r'\[', Comment, '#push'),
+ (r'\]', Comment, '#pop'),
+ (r'"', Comment, "commentString1"),
+ (r'\{', Comment, "commentString2"),
+ (r'[^(\[\]"{)]+', Comment),
+ ],
+ }
diff --git a/pygments/lexers/resource.py b/pygments/lexers/resource.py
new file mode 100644
index 0000000..2e6b2dd
--- /dev/null
+++ b/pygments/lexers/resource.py
@@ -0,0 +1,84 @@
+"""
+ pygments.lexers.resource
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for resource definition files.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Comment, String, Number, Operator, Text, \
+ Keyword, Name
+
+__all__ = ['ResourceLexer']
+
+
+class ResourceLexer(RegexLexer):
+ """Lexer for `ICU Resource bundles
+ <http://userguide.icu-project.org/locale/resources>`_.
+
+ .. versionadded:: 2.0
+ """
+ name = 'ResourceBundle'
+ aliases = ['resourcebundle', 'resource']
+ filenames = []
+
+ _types = (':table', ':array', ':string', ':bin', ':import', ':intvector',
+ ':int', ':alias')
+
+ flags = re.MULTILINE | re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'//.*?$', Comment),
+ (r'"', String, 'string'),
+ (r'-?\d+', Number.Integer),
+ (r'[,{}]', Operator),
+ (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types),
+ bygroups(Name, Text, Keyword)),
+ (r'\s+', Text),
+ (words(_types), Keyword),
+ ],
+ 'string': [
+ (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|'
+ r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String),
+ (r'\{', String.Escape, 'msgname'),
+ (r'"', String, '#pop')
+ ],
+ 'msgname': [
+ (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message'))
+ ],
+ 'message': [
+ (r'\{', String.Escape, 'msgname'),
+ (r'\}', String.Escape, '#pop'),
+ (r'(,)(\s*)([a-z]+)(\s*\})',
+ bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'),
+ (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)',
+ bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
+ String.Escape, Operator.Word, String.Escape, Operator,
+ String.Escape, Number.Integer, String.Escape), 'choice'),
+ (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)',
+ bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
+ String.Escape), 'choice'),
+ (r'\s+', String.Escape)
+ ],
+ 'choice': [
+ (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)',
+ bygroups(Operator, Number.Integer, String.Escape), 'message'),
+ (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'),
+ (r'\}', String.Escape, ('#pop', '#pop')),
+ (r'\s+', String.Escape)
+ ],
+ 'str': [
+ (r'\}', String.Escape, '#pop'),
+ (r'\{', String.Escape, 'msgname'),
+ (r'[^{}]+', String)
+ ]
+ }
+
+ def analyse_text(text):
+ if text.startswith('root:table'):
+ return 1.0
diff --git a/pygments/lexers/ride.py b/pygments/lexers/ride.py
new file mode 100644
index 0000000..e68c396
--- /dev/null
+++ b/pygments/lexers/ride.py
@@ -0,0 +1,139 @@
+"""
+ pygments.lexers.ride
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Ride programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include
+from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
+ String, Text
+
+__all__ = ['RideLexer']
+
+
+class RideLexer(RegexLexer):
+ """
+ For `Ride <https://docs.wavesplatform.com/en/ride/about-ride.html>`_
+ source code.
+
+ .. versionadded:: 2.6
+ """
+
+ name = 'Ride'
+ aliases = ['ride']
+ filenames = ['*.ride']
+ mimetypes = ['text/x-ride']
+
+ validName = r'[a-zA-Z_][a-zA-Z0-9_\']*'
+
+ builtinOps = (
+ '||', '|', '>=', '>', '==', '!',
+ '=', '<=', '<', '::', ':+', ':', '!=', '/',
+ '.', '=>', '-', '+', '*', '&&', '%', '++',
+ )
+
+ globalVariablesName = (
+ 'NOALG', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512',
+ 'SHA3224', 'SHA3256', 'SHA3384', 'SHA3512', 'nil', 'this', 'unit',
+ 'height', 'lastBlock', 'Buy', 'Sell', 'CEILING', 'FLOOR', 'DOWN',
+ 'HALFDOWN', 'HALFEVEN', 'HALFUP', 'UP',
+ )
+
+ typesName = (
+ 'Unit', 'Int', 'Boolean', 'ByteVector', 'String', 'Address', 'Alias',
+ 'Transfer', 'AssetPair', 'DataEntry', 'Order', 'Transaction',
+ 'GenesisTransaction', 'PaymentTransaction', 'ReissueTransaction',
+ 'BurnTransaction', 'MassTransferTransaction', 'ExchangeTransaction',
+ 'TransferTransaction', 'SetAssetScriptTransaction',
+ 'InvokeScriptTransaction', 'IssueTransaction', 'LeaseTransaction',
+ 'LeaseCancelTransaction', 'CreateAliasTransaction',
+ 'SetScriptTransaction', 'SponsorFeeTransaction', 'DataTransaction',
+ 'WriteSet', 'AttachedPayment', 'ScriptTransfer', 'TransferSet',
+ 'ScriptResult', 'Invocation', 'Asset', 'BlockInfo', 'Issue', 'Reissue',
+ 'Burn', 'NoAlg', 'Md5', 'Sha1', 'Sha224', 'Sha256', 'Sha384', 'Sha512',
+ 'Sha3224', 'Sha3256', 'Sha3384', 'Sha3512', 'BinaryEntry',
+ 'BooleanEntry', 'IntegerEntry', 'StringEntry', 'List', 'Ceiling',
+ 'Down', 'Floor', 'HalfDown', 'HalfEven', 'HalfUp', 'Up',
+ )
+
+ functionsName = (
+ 'fraction', 'size', 'toBytes', 'take', 'drop', 'takeRight', 'dropRight',
+ 'toString', 'isDefined', 'extract', 'throw', 'getElement', 'value',
+ 'cons', 'toUtf8String', 'toInt', 'indexOf', 'lastIndexOf', 'split',
+ 'parseInt', 'parseIntValue', 'keccak256', 'blake2b256', 'sha256',
+ 'sigVerify', 'toBase58String', 'fromBase58String', 'toBase64String',
+ 'fromBase64String', 'transactionById', 'transactionHeightById',
+ 'getInteger', 'getBoolean', 'getBinary', 'getString',
+ 'addressFromPublicKey', 'addressFromString', 'addressFromRecipient',
+ 'assetBalance', 'wavesBalance', 'getIntegerValue', 'getBooleanValue',
+ 'getBinaryValue', 'getStringValue', 'addressFromStringValue',
+ 'assetInfo', 'rsaVerify', 'checkMerkleProof', 'median',
+ 'valueOrElse', 'valueOrErrorMessage', 'contains', 'log', 'pow',
+ 'toBase16String', 'fromBase16String', 'blockInfoByHeight',
+ 'transferTransactionById',
+ )
+
+ reservedWords = words((
+ 'match', 'case', 'else', 'func', 'if',
+ 'let', 'then', '@Callable', '@Verifier',
+ ), suffix=r'\b')
+
+ tokens = {
+ 'root': [
+ # Comments
+ (r'#.*', Comment.Single),
+ # Whitespace
+ (r'\s+', Text),
+ # Strings
+ (r'"', String, 'doublequote'),
+ (r'utf8\'', String, 'utf8quote'),
+ (r'base(58|64|16)\'', String, 'singlequote'),
+ # Keywords
+ (reservedWords, Keyword.Reserved),
+ (r'\{-#.*?#-\}', Keyword.Reserved),
+ (r'FOLD<\d+>', Keyword.Reserved),
+ # Types
+ (words(typesName), Keyword.Type),
+ # Main
+ # (specialName, Keyword.Reserved),
+ # Prefix Operators
+ (words(builtinOps, prefix=r'\(', suffix=r'\)'), Name.Function),
+ # Infix Operators
+ (words(builtinOps), Name.Function),
+ (words(globalVariablesName), Name.Function),
+ (words(functionsName), Name.Function),
+ # Numbers
+ include('numbers'),
+ # Variable Names
+ (validName, Name.Variable),
+ # Parens
+ (r'[,()\[\]{}]', Punctuation),
+ ],
+
+ 'doublequote': [
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\[nrfvb\\"]', String.Escape),
+ (r'[^"]', String),
+ (r'"', String, '#pop'),
+ ],
+
+ 'utf8quote': [
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\[nrfvb\\\']', String.Escape),
+ (r'[^\']', String),
+ (r'\'', String, '#pop'),
+ ],
+
+ 'singlequote': [
+ (r'[^\']', String),
+ (r'\'', String, '#pop'),
+ ],
+
+ 'numbers': [
+ (r'_?\d+', Number.Integer),
+ ],
+ }
diff --git a/pygments/lexers/rita.py b/pygments/lexers/rita.py
new file mode 100644
index 0000000..ff742e9
--- /dev/null
+++ b/pygments/lexers/rita.py
@@ -0,0 +1,43 @@
+"""
+ pygments.lexers.rita
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for RITA language
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Comment, Operator, Keyword, Name, Literal, \
+ Punctuation, Whitespace
+
+__all__ = ['RitaLexer']
+
+
+class RitaLexer(RegexLexer):
+ """
+ Lexer for RITA.
+
+ .. versionadded:: 2.11
+ """
+ name = 'Rita'
+ url = 'https://github.com/zaibacu/rita-dsl'
+ filenames = ['*.rita']
+ aliases = ['rita']
+ mimetypes = ['text/rita']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'#(.*?)\n', Comment.Single),
+ (r'@(.*?)\n', Operator), # Yes, whole line as an operator
+ (r'"(\w|\d|\s|(\\")|[\'_\-./,\?\!])+?"', Literal),
+ (r'\'(\w|\d|\s|(\\\')|["_\-./,\?\!])+?\'', Literal),
+ (r'([A-Z_]+)', Keyword),
+ (r'([a-z0-9_]+)', Name),
+ (r'((->)|[!?+*|=])', Operator),
+ (r'[\(\),\{\}]', Punctuation)
+ ]
+ }
diff --git a/pygments/lexers/rnc.py b/pygments/lexers/rnc.py
new file mode 100644
index 0000000..fc1c9a8
--- /dev/null
+++ b/pygments/lexers/rnc.py
@@ -0,0 +1,67 @@
+"""
+ pygments.lexers.rnc
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Relax-NG Compact syntax
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Punctuation
+
+__all__ = ['RNCCompactLexer']
+
+
+class RNCCompactLexer(RegexLexer):
+ """
+ For RelaxNG-compact syntax.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'Relax-NG Compact'
+ url = 'http://relaxng.org'
+ aliases = ['rng-compact', 'rnc']
+ filenames = ['*.rnc']
+
+ tokens = {
+ 'root': [
+ (r'namespace\b', Keyword.Namespace),
+ (r'(?:default|datatypes)\b', Keyword.Declaration),
+ (r'##.*$', Comment.Preproc),
+ (r'#.*$', Comment.Single),
+ (r'"[^"]*"', String.Double),
+ # TODO single quoted strings and escape sequences outside of
+ # double-quoted strings
+ (r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'),
+ (r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'),
+ (r'[,?&*=|~]|>>', Operator),
+ (r'[(){}]', Punctuation),
+ (r'.', Text),
+ ],
+
+ # a variable has been declared using `element` or `attribute`
+ 'variable': [
+ (r'[^{]+', Name.Variable),
+ (r'\{', Punctuation, '#pop'),
+ ],
+
+ # after an xsd:<datatype> declaration there may be attributes
+ 'maybe_xsdattributes': [
+ (r'\{', Punctuation, 'xsdattributes'),
+ (r'\}', Punctuation, '#pop'),
+ (r'.', Text),
+ ],
+
+ # attributes take the form { key1 = value1 key2 = value2 ... }
+ 'xsdattributes': [
+ (r'[^ =}]', Name.Attribute),
+ (r'=', Operator),
+ (r'"[^"]*"', String.Double),
+ (r'\}', Punctuation, '#pop'),
+ (r'.', Text),
+ ],
+ }
diff --git a/pygments/lexers/roboconf.py b/pygments/lexers/roboconf.py
new file mode 100644
index 0000000..2d8e5ff
--- /dev/null
+++ b/pygments/lexers/roboconf.py
@@ -0,0 +1,81 @@
+"""
+ pygments.lexers.roboconf
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Roboconf DSL.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, re
+from pygments.token import Text, Operator, Keyword, Name, Comment
+
+__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer']
+
+
+class RoboconfGraphLexer(RegexLexer):
+ """
+ Lexer for Roboconf graph files.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Roboconf Graph'
+ aliases = ['roboconf-graph']
+ filenames = ['*.graph']
+
+ flags = re.IGNORECASE | re.MULTILINE
+ tokens = {
+ 'root': [
+ # Skip white spaces
+ (r'\s+', Text),
+
+ # There is one operator
+ (r'=', Operator),
+
+ # Keywords
+ (words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
+ (words((
+ 'installer', 'extends', 'exports', 'imports', 'facets',
+ 'children'), suffix=r'\s*:?', prefix=r'\b'), Name),
+
+ # Comments
+ (r'#.*\n', Comment),
+
+ # Default
+ (r'[^#]', Text),
+ (r'.*\n', Text)
+ ]
+ }
+
+
+class RoboconfInstancesLexer(RegexLexer):
+ """
+ Lexer for Roboconf instances files.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Roboconf Instances'
+ aliases = ['roboconf-instances']
+ filenames = ['*.instances']
+
+ flags = re.IGNORECASE | re.MULTILINE
+ tokens = {
+ 'root': [
+
+ # Skip white spaces
+ (r'\s+', Text),
+
+ # Keywords
+ (words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
+ (words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name),
+ (r'\s*[\w.-]+\s*:', Name),
+
+ # Comments
+ (r'#.*\n', Comment),
+
+ # Default
+ (r'[^#]', Text),
+ (r'.*\n', Text)
+ ]
+ }
diff --git a/pygments/lexers/robotframework.py b/pygments/lexers/robotframework.py
new file mode 100644
index 0000000..91794d0
--- /dev/null
+++ b/pygments/lexers/robotframework.py
@@ -0,0 +1,552 @@
+"""
+ pygments.lexers.robotframework
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Robot Framework.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Copyright 2012 Nokia Siemens Networks Oyj
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from pygments.lexer import Lexer
+from pygments.token import Token
+
+__all__ = ['RobotFrameworkLexer']
+
+
+HEADING = Token.Generic.Heading
+SETTING = Token.Keyword.Namespace
+IMPORT = Token.Name.Namespace
+TC_KW_NAME = Token.Generic.Subheading
+KEYWORD = Token.Name.Function
+ARGUMENT = Token.String
+VARIABLE = Token.Name.Variable
+COMMENT = Token.Comment
+SEPARATOR = Token.Punctuation
+SYNTAX = Token.Punctuation
+GHERKIN = Token.Generic.Emph
+ERROR = Token.Error
+
+
+def normalize(string, remove=''):
+ string = string.lower()
+ for char in remove + ' ':
+ if char in string:
+ string = string.replace(char, '')
+ return string
+
+
+class RobotFrameworkLexer(Lexer):
+ """
+ For Robot Framework test data.
+
+ Supports both space and pipe separated plain text formats.
+
+ .. versionadded:: 1.6
+ """
+ name = 'RobotFramework'
+ url = 'http://robotframework.org'
+ aliases = ['robotframework']
+ filenames = ['*.robot', '*.resource']
+ mimetypes = ['text/x-robotframework']
+
+ def __init__(self, **options):
+ options['tabsize'] = 2
+ options['encoding'] = 'UTF-8'
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ row_tokenizer = RowTokenizer()
+ var_tokenizer = VariableTokenizer()
+ index = 0
+ for row in text.splitlines():
+ for value, token in row_tokenizer.tokenize(row):
+ for value, token in var_tokenizer.tokenize(value, token):
+ if value:
+ yield index, token, str(value)
+ index += len(value)
+
+
+class VariableTokenizer:
+
+ def tokenize(self, string, token):
+ var = VariableSplitter(string, identifiers='$@%&')
+ if var.start < 0 or token in (COMMENT, ERROR):
+ yield string, token
+ return
+ for value, token in self._tokenize(var, string, token):
+ if value:
+ yield value, token
+
+ def _tokenize(self, var, string, orig_token):
+ before = string[:var.start]
+ yield before, orig_token
+ yield var.identifier + '{', SYNTAX
+ yield from self.tokenize(var.base, VARIABLE)
+ yield '}', SYNTAX
+ if var.index is not None:
+ yield '[', SYNTAX
+ yield from self.tokenize(var.index, VARIABLE)
+ yield ']', SYNTAX
+ yield from self.tokenize(string[var.end:], orig_token)
+
+
+class RowTokenizer:
+
+ def __init__(self):
+ self._table = UnknownTable()
+ self._splitter = RowSplitter()
+ testcases = TestCaseTable()
+ settings = SettingTable(testcases.set_default_template)
+ variables = VariableTable()
+ keywords = KeywordTable()
+ self._tables = {'settings': settings, 'setting': settings,
+ 'metadata': settings,
+ 'variables': variables, 'variable': variables,
+ 'testcases': testcases, 'testcase': testcases,
+ 'tasks': testcases, 'task': testcases,
+ 'keywords': keywords, 'keyword': keywords,
+ 'userkeywords': keywords, 'userkeyword': keywords}
+
+ def tokenize(self, row):
+ commented = False
+ heading = False
+ for index, value in enumerate(self._splitter.split(row)):
+ # First value, and every second after that, is a separator.
+ index, separator = divmod(index-1, 2)
+ if value.startswith('#'):
+ commented = True
+ elif index == 0 and value.startswith('*'):
+ self._table = self._start_table(value)
+ heading = True
+ yield from self._tokenize(value, index, commented,
+ separator, heading)
+ self._table.end_row()
+
+ def _start_table(self, header):
+ name = normalize(header, remove='*')
+ return self._tables.get(name, UnknownTable())
+
+ def _tokenize(self, value, index, commented, separator, heading):
+ if commented:
+ yield value, COMMENT
+ elif separator:
+ yield value, SEPARATOR
+ elif heading:
+ yield value, HEADING
+ else:
+ yield from self._table.tokenize(value, index)
+
+
+class RowSplitter:
+ _space_splitter = re.compile('( {2,})')
+ _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')
+
+ def split(self, row):
+ splitter = (row.startswith('| ') and self._split_from_pipes
+ or self._split_from_spaces)
+ yield from splitter(row)
+ yield '\n'
+
+ def _split_from_spaces(self, row):
+ yield '' # Start with (pseudo)separator similarly as with pipes
+ yield from self._space_splitter.split(row)
+
+ def _split_from_pipes(self, row):
+ _, separator, rest = self._pipe_splitter.split(row, 1)
+ yield separator
+ while self._pipe_splitter.search(rest):
+ cell, separator, rest = self._pipe_splitter.split(rest, 1)
+ yield cell
+ yield separator
+ yield rest
+
+
+class Tokenizer:
+ _tokens = None
+
+ def __init__(self):
+ self._index = 0
+
+ def tokenize(self, value):
+ values_and_tokens = self._tokenize(value, self._index)
+ self._index += 1
+ if isinstance(values_and_tokens, type(Token)):
+ values_and_tokens = [(value, values_and_tokens)]
+ return values_and_tokens
+
+ def _tokenize(self, value, index):
+ index = min(index, len(self._tokens) - 1)
+ return self._tokens[index]
+
+ def _is_assign(self, value):
+ if value.endswith('='):
+ value = value[:-1].strip()
+ var = VariableSplitter(value, identifiers='$@&')
+ return var.start == 0 and var.end == len(value)
+
+
+class Comment(Tokenizer):
+ _tokens = (COMMENT,)
+
+
+class Setting(Tokenizer):
+ _tokens = (SETTING, ARGUMENT)
+ _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
+ 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition',
+ 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate')
+ _import_settings = ('library', 'resource', 'variables')
+ _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
+ 'testtimeout','tasktimeout')
+ _custom_tokenizer = None
+
+ def __init__(self, template_setter=None):
+ Tokenizer.__init__(self)
+ self._template_setter = template_setter
+
+ def _tokenize(self, value, index):
+ if index == 1 and self._template_setter:
+ self._template_setter(value)
+ if index == 0:
+ normalized = normalize(value)
+ if normalized in self._keyword_settings:
+ self._custom_tokenizer = KeywordCall(support_assign=False)
+ elif normalized in self._import_settings:
+ self._custom_tokenizer = ImportSetting()
+ elif normalized not in self._other_settings:
+ return ERROR
+ elif self._custom_tokenizer:
+ return self._custom_tokenizer.tokenize(value)
+ return Tokenizer._tokenize(self, value, index)
+
+
+class ImportSetting(Tokenizer):
+ _tokens = (IMPORT, ARGUMENT)
+
+
+class TestCaseSetting(Setting):
+ _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
+ 'template')
+ _import_settings = ()
+ _other_settings = ('documentation', 'tags', 'timeout')
+
+ def _tokenize(self, value, index):
+ if index == 0:
+ type = Setting._tokenize(self, value[1:-1], index)
+ return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
+ return Setting._tokenize(self, value, index)
+
+
+class KeywordSetting(TestCaseSetting):
+ _keyword_settings = ('teardown',)
+ _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')
+
+
+class Variable(Tokenizer):
+ _tokens = (SYNTAX, ARGUMENT)
+
+ def _tokenize(self, value, index):
+ if index == 0 and not self._is_assign(value):
+ return ERROR
+ return Tokenizer._tokenize(self, value, index)
+
+
+class KeywordCall(Tokenizer):
+ _tokens = (KEYWORD, ARGUMENT)
+
+ def __init__(self, support_assign=True):
+ Tokenizer.__init__(self)
+ self._keyword_found = not support_assign
+ self._assigns = 0
+
+ def _tokenize(self, value, index):
+ if not self._keyword_found and self._is_assign(value):
+ self._assigns += 1
+ return SYNTAX # VariableTokenizer tokenizes this later.
+ if self._keyword_found:
+ return Tokenizer._tokenize(self, value, index - self._assigns)
+ self._keyword_found = True
+ return GherkinTokenizer().tokenize(value, KEYWORD)
+
+
+class GherkinTokenizer:
+ _gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE)
+
+ def tokenize(self, value, token):
+ match = self._gherkin_prefix.match(value)
+ if not match:
+ return [(value, token)]
+ end = match.end()
+ return [(value[:end], GHERKIN), (value[end:], token)]
+
+
+class TemplatedKeywordCall(Tokenizer):
+ _tokens = (ARGUMENT,)
+
+
+class ForLoop(Tokenizer):
+
+ def __init__(self):
+ Tokenizer.__init__(self)
+ self._in_arguments = False
+
+ def _tokenize(self, value, index):
+ token = self._in_arguments and ARGUMENT or SYNTAX
+ if value.upper() in ('IN', 'IN RANGE'):
+ self._in_arguments = True
+ return token
+
+
+class _Table:
+ _tokenizer_class = None
+
+ def __init__(self, prev_tokenizer=None):
+ self._tokenizer = self._tokenizer_class()
+ self._prev_tokenizer = prev_tokenizer
+ self._prev_values_on_row = []
+
+ def tokenize(self, value, index):
+ if self._continues(value, index):
+ self._tokenizer = self._prev_tokenizer
+ yield value, SYNTAX
+ else:
+ yield from self._tokenize(value, index)
+ self._prev_values_on_row.append(value)
+
+ def _continues(self, value, index):
+ return value == '...' and all(self._is_empty(t)
+ for t in self._prev_values_on_row)
+
+ def _is_empty(self, value):
+ return value in ('', '\\')
+
+ def _tokenize(self, value, index):
+ return self._tokenizer.tokenize(value)
+
+ def end_row(self):
+ self.__init__(prev_tokenizer=self._tokenizer)
+
+
+class UnknownTable(_Table):
+ _tokenizer_class = Comment
+
+ def _continues(self, value, index):
+ return False
+
+
+class VariableTable(_Table):
+ _tokenizer_class = Variable
+
+
+class SettingTable(_Table):
+ _tokenizer_class = Setting
+
+ def __init__(self, template_setter, prev_tokenizer=None):
+ _Table.__init__(self, prev_tokenizer)
+ self._template_setter = template_setter
+
+ def _tokenize(self, value, index):
+ if index == 0 and normalize(value) == 'testtemplate':
+ self._tokenizer = Setting(self._template_setter)
+ return _Table._tokenize(self, value, index)
+
+ def end_row(self):
+ self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
+
+
+class TestCaseTable(_Table):
+ _setting_class = TestCaseSetting
+ _test_template = None
+ _default_template = None
+
+ @property
+ def _tokenizer_class(self):
+ if self._test_template or (self._default_template and
+ self._test_template is not False):
+ return TemplatedKeywordCall
+ return KeywordCall
+
+ def _continues(self, value, index):
+ return index > 0 and _Table._continues(self, value, index)
+
+ def _tokenize(self, value, index):
+ if index == 0:
+ if value:
+ self._test_template = None
+ return GherkinTokenizer().tokenize(value, TC_KW_NAME)
+ if index == 1 and self._is_setting(value):
+ if self._is_template(value):
+ self._test_template = False
+ self._tokenizer = self._setting_class(self.set_test_template)
+ else:
+ self._tokenizer = self._setting_class()
+ if index == 1 and self._is_for_loop(value):
+ self._tokenizer = ForLoop()
+ if index == 1 and self._is_empty(value):
+ return [(value, SYNTAX)]
+ return _Table._tokenize(self, value, index)
+
+ def _is_setting(self, value):
+ return value.startswith('[') and value.endswith(']')
+
+ def _is_template(self, value):
+ return normalize(value) == '[template]'
+
+ def _is_for_loop(self, value):
+ return value.startswith(':') and normalize(value, remove=':') == 'for'
+
+ def set_test_template(self, template):
+ self._test_template = self._is_template_set(template)
+
+ def set_default_template(self, template):
+ self._default_template = self._is_template_set(template)
+
+ def _is_template_set(self, template):
+ return normalize(template) not in ('', '\\', 'none', '${empty}')
+
+
+class KeywordTable(TestCaseTable):
+ _tokenizer_class = KeywordCall
+ _setting_class = KeywordSetting
+
+ def _is_template(self, value):
+ return False
+
+
+# Following code copied directly from Robot Framework 2.7.5.
+
+class VariableSplitter:
+
+ def __init__(self, string, identifiers):
+ self.identifier = None
+ self.base = None
+ self.index = None
+ self.start = -1
+ self.end = -1
+ self._identifiers = identifiers
+ self._may_have_internal_variables = False
+ try:
+ self._split(string)
+ except ValueError:
+ pass
+ else:
+ self._finalize()
+
+ def get_replaced_base(self, variables):
+ if self._may_have_internal_variables:
+ return variables.replace_string(self.base)
+ return self.base
+
+ def _finalize(self):
+ self.identifier = self._variable_chars[0]
+ self.base = ''.join(self._variable_chars[2:-1])
+ self.end = self.start + len(self._variable_chars)
+ if self._has_list_or_dict_variable_index():
+ self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
+ self.end += len(self._list_and_dict_variable_index_chars)
+
+ def _has_list_or_dict_variable_index(self):
+ return self._list_and_dict_variable_index_chars\
+ and self._list_and_dict_variable_index_chars[-1] == ']'
+
+ def _split(self, string):
+ start_index, max_index = self._find_variable(string)
+ self.start = start_index
+ self._open_curly = 1
+ self._state = self._variable_state
+ self._variable_chars = [string[start_index], '{']
+ self._list_and_dict_variable_index_chars = []
+ self._string = string
+ start_index += 2
+ for index, char in enumerate(string[start_index:]):
+ index += start_index # Giving start to enumerate only in Py 2.6+
+ try:
+ self._state(char, index)
+ except StopIteration:
+ return
+ if index == max_index and not self._scanning_list_variable_index():
+ return
+
+ def _scanning_list_variable_index(self):
+ return self._state in [self._waiting_list_variable_index_state,
+ self._list_variable_index_state]
+
+ def _find_variable(self, string):
+ max_end_index = string.rfind('}')
+ if max_end_index == -1:
+ raise ValueError('No variable end found')
+ if self._is_escaped(string, max_end_index):
+ return self._find_variable(string[:max_end_index])
+ start_index = self._find_start_index(string, 1, max_end_index)
+ if start_index == -1:
+ raise ValueError('No variable start found')
+ return start_index, max_end_index
+
+ def _find_start_index(self, string, start, end):
+ index = string.find('{', start, end) - 1
+ if index < 0:
+ return -1
+ if self._start_index_is_ok(string, index):
+ return index
+ return self._find_start_index(string, index+2, end)
+
+ def _start_index_is_ok(self, string, index):
+ return string[index] in self._identifiers\
+ and not self._is_escaped(string, index)
+
+ def _is_escaped(self, string, index):
+ escaped = False
+ while index > 0 and string[index-1] == '\\':
+ index -= 1
+ escaped = not escaped
+ return escaped
+
+ def _variable_state(self, char, index):
+ self._variable_chars.append(char)
+ if char == '}' and not self._is_escaped(self._string, index):
+ self._open_curly -= 1
+ if self._open_curly == 0:
+ if not self._is_list_or_dict_variable():
+ raise StopIteration
+ self._state = self._waiting_list_variable_index_state
+ elif char in self._identifiers:
+ self._state = self._internal_variable_start_state
+
+ def _is_list_or_dict_variable(self):
+ return self._variable_chars[0] in ('@','&')
+
+ def _internal_variable_start_state(self, char, index):
+ self._state = self._variable_state
+ if char == '{':
+ self._variable_chars.append(char)
+ self._open_curly += 1
+ self._may_have_internal_variables = True
+ else:
+ self._variable_state(char, index)
+
+ def _waiting_list_variable_index_state(self, char, index):
+ if char != '[':
+ raise StopIteration
+ self._list_and_dict_variable_index_chars.append(char)
+ self._state = self._list_variable_index_state
+
+ def _list_variable_index_state(self, char, index):
+ self._list_and_dict_variable_index_chars.append(char)
+ if char == ']':
+ raise StopIteration
diff --git a/pygments/lexers/ruby.py b/pygments/lexers/ruby.py
new file mode 100644
index 0000000..b71b535
--- /dev/null
+++ b/pygments/lexers/ruby.py
@@ -0,0 +1,523 @@
+"""
+ pygments.lexers.ruby
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Ruby and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \
+ bygroups, default, LexerContext, do_insertions, words, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error, Generic, Whitespace
+from pygments.util import shebang_matches
+
+__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer']
+
+
+RUBY_OPERATORS = (
+ '*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~',
+ '[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '==='
+)
+
+
+class RubyLexer(ExtendedRegexLexer):
+ """
+ For Ruby source code.
+ """
+
+ name = 'Ruby'
+ url = 'http://www.ruby-lang.org'
+ aliases = ['ruby', 'rb', 'duby']
+ filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
+ '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile']
+ mimetypes = ['text/x-ruby', 'application/x-ruby']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ def heredoc_callback(self, match, ctx):
+ # okay, this is the hardest part of parsing Ruby...
+ # match: 1 = <<[-~]?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
+
+ start = match.start(1)
+ yield start, Operator, match.group(1) # <<[-~]?
+ yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
+ yield match.start(3), String.Delimiter, match.group(3) # heredoc name
+ yield match.start(4), String.Heredoc, match.group(4) # quote again
+
+ heredocstack = ctx.__dict__.setdefault('heredocstack', [])
+ outermost = not bool(heredocstack)
+ heredocstack.append((match.group(1) in ('<<-', '<<~'), match.group(3)))
+
+ ctx.pos = match.start(5)
+ ctx.end = match.end(5)
+ # this may find other heredocs, so limit the recursion depth
+ if len(heredocstack) < 100:
+ yield from self.get_tokens_unprocessed(context=ctx)
+ else:
+ yield ctx.pos, String.Heredoc, match.group(5)
+ ctx.pos = match.end()
+
+ if outermost:
+ # this is the outer heredoc again, now we can process them all
+ for tolerant, hdname in heredocstack:
+ lines = []
+ for match in line_re.finditer(ctx.text, ctx.pos):
+ if tolerant:
+ check = match.group().strip()
+ else:
+ check = match.group().rstrip()
+ if check == hdname:
+ for amatch in lines:
+ yield amatch.start(), String.Heredoc, amatch.group()
+ yield match.start(), String.Delimiter, match.group()
+ ctx.pos = match.end()
+ break
+ else:
+ lines.append(match)
+ else:
+ # end of heredoc not found -- error!
+ for amatch in lines:
+ yield amatch.start(), Error, amatch.group()
+ ctx.end = len(ctx.text)
+ del heredocstack[:]
+
+ def gen_rubystrings_rules():
+ def intp_regex_callback(self, match, ctx):
+ yield match.start(1), String.Regex, match.group(1) # begin
+ nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
+ for i, t, v in self.get_tokens_unprocessed(context=nctx):
+ yield match.start(3)+i, t, v
+ yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
+ ctx.pos = match.end()
+
+ def intp_string_callback(self, match, ctx):
+ yield match.start(1), String.Other, match.group(1)
+ nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
+ for i, t, v in self.get_tokens_unprocessed(context=nctx):
+ yield match.start(3)+i, t, v
+ yield match.start(4), String.Other, match.group(4) # end
+ ctx.pos = match.end()
+
+ states = {}
+ states['strings'] = [
+ # easy ones
+ (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
+ (words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
+ (r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
+ (r':"', String.Symbol, 'simple-sym'),
+ (r'([a-zA-Z_]\w*)(:)(?!:)',
+ bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9
+ (r'"', String.Double, 'simple-string-double'),
+ (r"'", String.Single, 'simple-string-single'),
+ (r'(?<!\.)`', String.Backtick, 'simple-backtick'),
+ ]
+
+ # quoted string and symbol
+ for name, ttype, end in ('string-double', String.Double, '"'), \
+ ('string-single', String.Single, "'"),\
+ ('sym', String.Symbol, '"'), \
+ ('backtick', String.Backtick, '`'):
+ states['simple-'+name] = [
+ include('string-intp-escaped'),
+ (r'[^\\%s#]+' % end, ttype),
+ (r'[\\#]', ttype),
+ (end, ttype, '#pop'),
+ ]
+
+ # braced quoted strings
+ for lbrace, rbrace, bracecc, name in \
+ ('\\{', '\\}', '{}', 'cb'), \
+ ('\\[', '\\]', '\\[\\]', 'sb'), \
+ ('\\(', '\\)', '()', 'pa'), \
+ ('<', '>', '<>', 'ab'):
+ states[name+'-intp-string'] = [
+ (r'\\[\\' + bracecc + ']', String.Other),
+ (lbrace, String.Other, '#push'),
+ (rbrace, String.Other, '#pop'),
+ include('string-intp-escaped'),
+ (r'[\\#' + bracecc + ']', String.Other),
+ (r'[^\\#' + bracecc + ']+', String.Other),
+ ]
+ states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
+ name+'-intp-string'))
+ states[name+'-string'] = [
+ (r'\\[\\' + bracecc + ']', String.Other),
+ (lbrace, String.Other, '#push'),
+ (rbrace, String.Other, '#pop'),
+ (r'[\\#' + bracecc + ']', String.Other),
+ (r'[^\\#' + bracecc + ']+', String.Other),
+ ]
+ states['strings'].append((r'%[qsw]' + lbrace, String.Other,
+ name+'-string'))
+ states[name+'-regex'] = [
+ (r'\\[\\' + bracecc + ']', String.Regex),
+ (lbrace, String.Regex, '#push'),
+ (rbrace + '[mixounse]*', String.Regex, '#pop'),
+ include('string-intp'),
+ (r'[\\#' + bracecc + ']', String.Regex),
+ (r'[^\\#' + bracecc + ']+', String.Regex),
+ ]
+ states['strings'].append((r'%r' + lbrace, String.Regex,
+ name+'-regex'))
+
+ # these must come after %<brace>!
+ states['strings'] += [
+ # %r regex
+ (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
+ intp_regex_callback),
+ # regular fancy strings with qsw
+ (r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other),
+ (r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
+ intp_string_callback),
+ # special forms of fancy strings after operators or
+ # in method calls with braces
+ (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
+ bygroups(Whitespace, String.Other, None)),
+ # and because of fixed width lookbehinds the whole thing a
+ # second time for line startings...
+ (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
+ bygroups(Whitespace, String.Other, None)),
+ # all regular fancy strings without qsw
+ (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
+ intp_string_callback),
+ ]
+
+ return states
+
+ tokens = {
+ 'root': [
+ (r'\A#!.+?$', Comment.Hashbang),
+ (r'#.*?$', Comment.Single),
+ (r'=begin\s.*?\n=end.*?$', Comment.Multiline),
+ # keywords
+ (words((
+ 'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?',
+ 'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo',
+ 'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef',
+ 'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'),
+ Keyword),
+ # start of function, class and module names
+ (r'(module)(\s+)([a-zA-Z_]\w*'
+ r'(?:::[a-zA-Z_]\w*)*)',
+ bygroups(Keyword, Whitespace, Name.Namespace)),
+ (r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
+ (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
+ (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+ # special methods
+ (words((
+ 'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader',
+ 'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private',
+ 'module_function', 'public', 'protected', 'true', 'false', 'nil'),
+ suffix=r'\b'),
+ Keyword.Pseudo),
+ (r'(not|and|or)\b', Operator.Word),
+ (words((
+ 'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include',
+ 'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil',
+ 'private_method_defined', 'protected_method_defined',
+ 'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'),
+ Name.Builtin),
+ (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
+ (words((
+ 'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort',
+ 'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller',
+ 'catch', 'chomp', 'chop', 'class_eval', 'class_variables',
+ 'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set',
+ 'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork',
+ 'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub',
+ 'hash', 'id', 'included_modules', 'inspect', 'instance_eval',
+ 'instance_method', 'instance_methods',
+ 'instance_variable_get', 'instance_variable_set', 'instance_variables',
+ 'lambda', 'load', 'local_variables', 'loop',
+ 'method', 'method_missing', 'methods', 'module_eval', 'name',
+ 'object_id', 'open', 'p', 'print', 'printf', 'private_class_method',
+ 'private_instance_methods',
+ 'private_methods', 'proc', 'protected_instance_methods',
+ 'protected_methods', 'public_class_method',
+ 'public_instance_methods', 'public_methods',
+ 'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require',
+ 'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep',
+ 'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint',
+ 'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint',
+ 'untrace_var', 'warn'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
+ # normal heredocs
+ (r'(?<!\w)(<<[-~]?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
+ heredoc_callback),
+ # empty string heredocs
+ (r'(<<[-~]?)("|\')()(\2)(.*?\n)', heredoc_callback),
+ (r'__END__', Comment.Preproc, 'end-part'),
+ # multiline regex (after keywords or assignments)
+ (r'(?:^|(?<=[=<>~!:])|'
+ r'(?<=(?:\s|;)when\s)|'
+ r'(?<=(?:\s|;)or\s)|'
+ r'(?<=(?:\s|;)and\s)|'
+ r'(?<=\.index\s)|'
+ r'(?<=\.scan\s)|'
+ r'(?<=\.sub\s)|'
+ r'(?<=\.sub!\s)|'
+ r'(?<=\.gsub\s)|'
+ r'(?<=\.gsub!\s)|'
+ r'(?<=\.match\s)|'
+ r'(?<=(?:\s|;)if\s)|'
+ r'(?<=(?:\s|;)elsif\s)|'
+ r'(?<=^when\s)|'
+ r'(?<=^index\s)|'
+ r'(?<=^scan\s)|'
+ r'(?<=^sub\s)|'
+ r'(?<=^gsub\s)|'
+ r'(?<=^sub!\s)|'
+ r'(?<=^gsub!\s)|'
+ r'(?<=^match\s)|'
+ r'(?<=^if\s)|'
+ r'(?<=^elsif\s)'
+ r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
+ # multiline regex (in method calls or subscripts)
+ (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
+ # multiline regex (this time the funny no whitespace rule)
+ (r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
+ 'multiline-regex'),
+ # lex numbers and ignore following regular expressions which
+ # are division operators in fact (grrrr. i hate that. any
+ # better ideas?)
+ # since pygments 0.7 we also eat a "?" operator after numbers
+ # so that the char operator does not work. Chars are not allowed
+ # there so that you can use the ternary operator.
+ # stupid example:
+ # x>=0?n[x]:""
+ (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
+ bygroups(Number.Oct, Whitespace, Operator)),
+ (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
+ bygroups(Number.Hex, Whitespace, Operator)),
+ (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
+ bygroups(Number.Bin, Whitespace, Operator)),
+ (r'([\d]+(?:_\d+)*)(\s*)([/?])?',
+ bygroups(Number.Integer, Whitespace, Operator)),
+ # Names
+ (r'@@[a-zA-Z_]\w*', Name.Variable.Class),
+ (r'@[a-zA-Z_]\w*', Name.Variable.Instance),
+ (r'\$\w+', Name.Variable.Global),
+ (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
+ (r'\$-[0adFiIlpvw]', Name.Variable.Global),
+ (r'::', Operator),
+ include('strings'),
+ # chars
+ (r'\?(\\[MC]-)*' # modifiers
+ r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
+ r'(?!\w)',
+ String.Char),
+ (r'[A-Z]\w+', Name.Constant),
+ # this is needed because ruby attributes can look
+ # like keywords (class) or like this: ` ?!?
+ (words(RUBY_OPERATORS, prefix=r'(\.|::)'),
+ bygroups(Operator, Name.Operator)),
+ (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
+ bygroups(Operator, Name)),
+ (r'[a-zA-Z_]\w*[!?]?', Name),
+ (r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
+ r'!~|&&?|\|\||\.{1,3})', Operator),
+ (r'[-+/*%=<>&!^|~]=?', Operator),
+ (r'[(){};,/?:\\]', Punctuation),
+ (r'\s+', Whitespace)
+ ],
+ 'funcname': [
+ (r'\(', Punctuation, 'defexpr'),
+ (r'(?:([a-zA-Z_]\w*)(\.))?' # optional scope name, like "self."
+ r'('
+ r'[a-zA-Z\u0080-\uffff][a-zA-Z0-9_\u0080-\uffff]*[!?=]?' # method name
+ r'|!=|!~|=~|\*\*?|[-+!~]@?|[/%&|^]|<=>|<[<=]?|>[>=]?|===?' # or operator override
+ r'|\[\]=?' # or element reference/assignment override
+ r'|`' # or the undocumented backtick override
+ r')',
+ bygroups(Name.Class, Operator, Name.Function), '#pop'),
+ default('#pop')
+ ],
+ 'classname': [
+ (r'\(', Punctuation, 'defexpr'),
+ (r'<<', Operator, '#pop'),
+ (r'[A-Z_]\w*', Name.Class, '#pop'),
+ default('#pop')
+ ],
+ 'defexpr': [
+ (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
+ (r'\(', Operator, '#push'),
+ include('root')
+ ],
+ 'in-intp': [
+ (r'\{', String.Interpol, '#push'),
+ (r'\}', String.Interpol, '#pop'),
+ include('root'),
+ ],
+ 'string-intp': [
+ (r'#\{', String.Interpol, 'in-intp'),
+ (r'#@@?[a-zA-Z_]\w*', String.Interpol),
+ (r'#\$[a-zA-Z_]\w*', String.Interpol)
+ ],
+ 'string-intp-escaped': [
+ include('string-intp'),
+ (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})',
+ String.Escape)
+ ],
+ 'interpolated-regex': [
+ include('string-intp'),
+ (r'[\\#]', String.Regex),
+ (r'[^\\#]+', String.Regex),
+ ],
+ 'interpolated-string': [
+ include('string-intp'),
+ (r'[\\#]', String.Other),
+ (r'[^\\#]+', String.Other),
+ ],
+ 'multiline-regex': [
+ include('string-intp'),
+ (r'\\\\', String.Regex),
+ (r'\\/', String.Regex),
+ (r'[\\#]', String.Regex),
+ (r'[^\\/#]+', String.Regex),
+ (r'/[mixounse]*', String.Regex, '#pop'),
+ ],
+ 'end-part': [
+ (r'.+', Comment.Preproc, '#pop')
+ ]
+ }
+ tokens.update(gen_rubystrings_rules())
+
+ def analyse_text(text):
+ return shebang_matches(text, r'ruby(1\.\d)?')
+
+
+class RubyConsoleLexer(Lexer):
+ """
+ For Ruby interactive console (**irb**) output like:
+
+ .. sourcecode:: rbcon
+
+ irb(main):001:0> a = 1
+ => 1
+ irb(main):002:0> puts a
+ 1
+ => nil
+ """
+ name = 'Ruby irb session'
+ aliases = ['rbcon', 'irb']
+ mimetypes = ['text/x-ruby-shellsession']
+
+ _prompt_re = re.compile(r'irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] '
+ r'|>> |\?> ')
+
+ def get_tokens_unprocessed(self, text):
+ rblexer = RubyLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ m = self._prompt_re.match(line)
+ if m is not None:
+ end = m.end()
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:end])]))
+ curcode += line[end:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, rblexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ yield match.start(), Generic.Output, line
+ if curcode:
+ yield from do_insertions(
+ insertions, rblexer.get_tokens_unprocessed(curcode))
+
+
+class FancyLexer(RegexLexer):
+ """
+ Pygments Lexer For Fancy.
+
+ Fancy is a self-hosted, pure object-oriented, dynamic,
+ class-based, concurrent general-purpose programming language
+ running on Rubinius, the Ruby VM.
+
+ .. versionadded:: 1.5
+ """
+ name = 'Fancy'
+ url = 'https://github.com/bakkdoor/fancy'
+ filenames = ['*.fy', '*.fancypack']
+ aliases = ['fancy', 'fy']
+ mimetypes = ['text/x-fancysrc']
+
+ tokens = {
+ # copied from PerlLexer:
+ 'balanced-regex': [
+ (r'/(\\\\|\\[^\\]|[^/\\])*/[egimosx]*', String.Regex, '#pop'),
+ (r'!(\\\\|\\[^\\]|[^!\\])*![egimosx]*', String.Regex, '#pop'),
+ (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
+ (r'\{(\\\\|\\[^\\]|[^}\\])*\}[egimosx]*', String.Regex, '#pop'),
+ (r'<(\\\\|\\[^\\]|[^>\\])*>[egimosx]*', String.Regex, '#pop'),
+ (r'\[(\\\\|\\[^\\]|[^\]\\])*\][egimosx]*', String.Regex, '#pop'),
+ (r'\((\\\\|\\[^\\]|[^)\\])*\)[egimosx]*', String.Regex, '#pop'),
+ (r'@(\\\\|\\[^\\]|[^@\\])*@[egimosx]*', String.Regex, '#pop'),
+ (r'%(\\\\|\\[^\\]|[^%\\])*%[egimosx]*', String.Regex, '#pop'),
+ (r'\$(\\\\|\\[^\\]|[^$\\])*\$[egimosx]*', String.Regex, '#pop'),
+ ],
+ 'root': [
+ (r'\s+', Whitespace),
+
+ # balanced delimiters (copied from PerlLexer):
+ (r's\{(\\\\|\\[^\\]|[^}\\])*\}\s*', String.Regex, 'balanced-regex'),
+ (r's<(\\\\|\\[^\\]|[^>\\])*>\s*', String.Regex, 'balanced-regex'),
+ (r's\[(\\\\|\\[^\\]|[^\]\\])*\]\s*', String.Regex, 'balanced-regex'),
+ (r's\((\\\\|\\[^\\]|[^)\\])*\)\s*', String.Regex, 'balanced-regex'),
+ (r'm?/(\\\\|\\[^\\]|[^///\n])*/[gcimosx]*', String.Regex),
+ (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
+
+ # Comments
+ (r'#(.*?)\n', Comment.Single),
+ # Symbols
+ (r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol),
+ # Multi-line DoubleQuotedString
+ (r'"""(\\\\|\\[^\\]|[^\\])*?"""', String),
+ # DoubleQuotedString
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # keywords
+ (r'(def|class|try|catch|finally|retry|return|return_local|match|'
+ r'case|->|=>)\b', Keyword),
+ # constants
+ (r'(self|super|nil|false|true)\b', Name.Constant),
+ (r'[(){};,/?|:\\]', Punctuation),
+ # names
+ (words((
+ 'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String',
+ 'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass',
+ 'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set',
+ 'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'),
+ Name.Builtin),
+ # functions
+ (r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function),
+ # operators, must be below functions
+ (r'[-+*/~,<>=&!?%^\[\].$]+', Operator),
+ (r'[A-Z]\w*', Name.Constant),
+ (r'@[a-zA-Z_]\w*', Name.Variable.Instance),
+ (r'@@[a-zA-Z_]\w*', Name.Variable.Class),
+ ('@@?', Operator),
+ (r'[a-zA-Z_]\w*', Name),
+ # numbers - / checks are necessary to avoid mismarking regexes,
+ # see comment in RubyLexer
+ (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
+ bygroups(Number.Oct, Whitespace, Operator)),
+ (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
+ bygroups(Number.Hex, Whitespace, Operator)),
+ (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
+ bygroups(Number.Bin, Whitespace, Operator)),
+ (r'([\d]+(?:_\d+)*)(\s*)([/?])?',
+ bygroups(Number.Integer, Whitespace, Operator)),
+ (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+', Number.Integer)
+ ]
+ }
diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py
new file mode 100644
index 0000000..500c7a5
--- /dev/null
+++ b/pygments/lexers/rust.py
@@ -0,0 +1,223 @@
+"""
+ pygments.lexers.rust
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Rust language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, words, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['RustLexer']
+
+
+class RustLexer(RegexLexer):
+ """
+ Lexer for the Rust programming language (version 1.47).
+
+ .. versionadded:: 1.6
+ """
+ name = 'Rust'
+ url = 'https://www.rust-lang.org/'
+ filenames = ['*.rs', '*.rs.in']
+ aliases = ['rust', 'rs']
+ mimetypes = ['text/rust', 'text/x-rust']
+
+ keyword_types = (words((
+ 'u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128',
+ 'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool',
+ ), suffix=r'\b'), Keyword.Type)
+
+ builtin_funcs_types = (words((
+ 'Copy', 'Send', 'Sized', 'Sync', 'Unpin',
+ 'Drop', 'Fn', 'FnMut', 'FnOnce', 'drop',
+ 'Box', 'ToOwned', 'Clone',
+ 'PartialEq', 'PartialOrd', 'Eq', 'Ord',
+ 'AsRef', 'AsMut', 'Into', 'From', 'Default',
+ 'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator',
+ 'ExactSizeIterator',
+ 'Option', 'Some', 'None',
+ 'Result', 'Ok', 'Err',
+ 'String', 'ToString', 'Vec',
+ ), suffix=r'\b'), Name.Builtin)
+
+ builtin_macros = (words((
+ 'asm', 'assert', 'assert_eq', 'assert_ne', 'cfg', 'column',
+ 'compile_error', 'concat', 'concat_idents', 'dbg', 'debug_assert',
+ 'debug_assert_eq', 'debug_assert_ne', 'env', 'eprint', 'eprintln',
+ 'file', 'format', 'format_args', 'format_args_nl', 'global_asm',
+ 'include', 'include_bytes', 'include_str',
+ 'is_aarch64_feature_detected',
+ 'is_arm_feature_detected',
+ 'is_mips64_feature_detected',
+ 'is_mips_feature_detected',
+ 'is_powerpc64_feature_detected',
+ 'is_powerpc_feature_detected',
+ 'is_x86_feature_detected',
+ 'line', 'llvm_asm', 'log_syntax', 'macro_rules', 'matches',
+ 'module_path', 'option_env', 'panic', 'print', 'println', 'stringify',
+ 'thread_local', 'todo', 'trace_macros', 'unimplemented', 'unreachable',
+ 'vec', 'write', 'writeln',
+ ), suffix=r'!'), Name.Function.Magic)
+
+ tokens = {
+ 'root': [
+ # rust allows a file to start with a shebang, but if the first line
+ # starts with #![ then it's not a shebang but a crate attribute.
+ (r'#![^[\r\n].*$', Comment.Preproc),
+ default('base'),
+ ],
+ 'base': [
+ # Whitespace and Comments
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'//!.*?\n', String.Doc),
+ (r'///(\n|[^/].*?\n)', String.Doc),
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'),
+ (r'/\*!', String.Doc, 'doccomment'),
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ # Macro parameters
+ (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc),
+ # Keywords
+ (words(('as', 'async', 'await', 'box', 'const', 'crate', 'dyn',
+ 'else', 'extern', 'for', 'if', 'impl', 'in', 'loop',
+ 'match', 'move', 'mut', 'pub', 'ref', 'return', 'static',
+ 'super', 'trait', 'unsafe', 'use', 'where', 'while'),
+ suffix=r'\b'), Keyword),
+ (words(('abstract', 'become', 'do', 'final', 'macro', 'override',
+ 'priv', 'typeof', 'try', 'unsized', 'virtual', 'yield'),
+ suffix=r'\b'), Keyword.Reserved),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'self\b', Name.Builtin.Pseudo),
+ (r'mod\b', Keyword, 'modname'),
+ (r'let\b', Keyword.Declaration),
+ (r'fn\b', Keyword, 'funcname'),
+ (r'(struct|enum|type|union)\b', Keyword, 'typename'),
+ (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)),
+ keyword_types,
+ (r'[sS]elf\b', Name.Builtin.Pseudo),
+ # Prelude (taken from Rust's src/libstd/prelude.rs)
+ builtin_funcs_types,
+ builtin_macros,
+ # Path separators, so types don't catch them.
+ (r'::\b', Text),
+ # Types in positions.
+ (r'(?::|->)', Text, 'typename'),
+ # Labels
+ (r'(break|continue)(\b\s*)(\'[A-Za-z_]\w*)?',
+ bygroups(Keyword, Text.Whitespace, Name.Label)),
+
+ # Character literals
+ (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
+ r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
+ String.Char),
+ (r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0"""
+ r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
+ String.Char),
+
+ # Binary literals
+ (r'0b[01_]+', Number.Bin, 'number_lit'),
+ # Octal literals
+ (r'0o[0-7_]+', Number.Oct, 'number_lit'),
+ # Hexadecimal literals
+ (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
+ # Decimal literals
+ (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+ r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float,
+ 'number_lit'),
+ (r'[0-9][0-9_]*', Number.Integer, 'number_lit'),
+
+ # String literals
+ (r'b"', String, 'bytestring'),
+ (r'"', String, 'string'),
+ (r'(?s)b?r(#*)".*?"\1', String),
+
+ # Lifetime names
+ (r"'", Operator, 'lifetime'),
+
+ # Operators and Punctuation
+ (r'\.\.=?', Operator),
+ (r'[{}()\[\],.;]', Punctuation),
+ (r'[+\-*/%&|<>^!~@=:?]', Operator),
+
+ # Identifiers
+ (r'[a-zA-Z_]\w*', Name),
+ # Raw identifiers
+ (r'r#[a-zA-Z_]\w*', Name),
+
+ # Attributes
+ (r'#!?\[', Comment.Preproc, 'attribute['),
+
+ # Misc
+ # Lone hashes: not used in Rust syntax, but allowed in macro
+ # arguments, most famously for quote::quote!()
+ (r'#', Text),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'doccomment': [
+ (r'[^*/]+', String.Doc),
+ (r'/\*', String.Doc, '#push'),
+ (r'\*/', String.Doc, '#pop'),
+ (r'[*/]', String.Doc),
+ ],
+ 'modname': [
+ (r'\s+', Text),
+ (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
+ default('#pop'),
+ ],
+ 'funcname': [
+ (r'\s+', Text),
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
+ default('#pop'),
+ ],
+ 'typename': [
+ (r'\s+', Text),
+ (r'&', Keyword.Pseudo),
+ (r"'", Operator, 'lifetime'),
+ builtin_funcs_types,
+ keyword_types,
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+ default('#pop'),
+ ],
+ 'lifetime': [
+ (r"(static|_)", Name.Builtin),
+ (r"[a-zA-Z_]+\w*", Name.Attribute),
+ default('#pop'),
+ ],
+ 'number_lit': [
+ (r'[ui](8|16|32|64|size)', Keyword, '#pop'),
+ (r'f(32|64)', Keyword, '#pop'),
+ default('#pop'),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
+ r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape),
+ (r'[^\\"]+', String),
+ (r'\\', String),
+ ],
+ 'bytestring': [
+ (r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape),
+ include('string'),
+ ],
+ 'attribute_common': [
+ (r'"', String, 'string'),
+ (r'\[', Comment.Preproc, 'attribute['),
+ ],
+ 'attribute[': [
+ include('attribute_common'),
+ (r'\]', Comment.Preproc, '#pop'),
+ (r'[^"\]\[]+', Comment.Preproc),
+ ],
+ }
diff --git a/pygments/lexers/sas.py b/pygments/lexers/sas.py
new file mode 100644
index 0000000..f2f408c
--- /dev/null
+++ b/pygments/lexers/sas.py
@@ -0,0 +1,227 @@
+"""
+ pygments.lexers.sas
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for SAS.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Comment, Keyword, Name, Number, String, Text, \
+ Other, Generic
+
+__all__ = ['SASLexer']
+
+
+class SASLexer(RegexLexer):
+ """
+ For SAS files.
+
+ .. versionadded:: 2.2
+ """
+ # Syntax from syntax/sas.vim by James Kidd <james.kidd@covance.com>
+
+ name = 'SAS'
+ aliases = ['sas']
+ filenames = ['*.SAS', '*.sas']
+ mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
+ flags = re.IGNORECASE | re.MULTILINE
+
+ builtins_macros = (
+ "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
+ "display", "do", "else", "end", "eval", "global", "goto", "if",
+ "index", "input", "keydef", "label", "left", "length", "let",
+ "local", "lowcase", "macro", "mend", "nrquote",
+ "nrstr", "put", "qleft", "qlowcase", "qscan",
+ "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
+ "str", "substr", "superq", "syscall", "sysevalf", "sysexec",
+ "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
+ "then", "to", "trim", "unquote", "until", "upcase", "verify",
+ "while", "window"
+ )
+
+ builtins_conditionals = (
+ "do", "if", "then", "else", "end", "until", "while"
+ )
+
+ builtins_statements = (
+ "abort", "array", "attrib", "by", "call", "cards", "cards4",
+ "catname", "continue", "datalines", "datalines4", "delete", "delim",
+ "delimiter", "display", "dm", "drop", "endsas", "error", "file",
+ "filename", "footnote", "format", "goto", "in", "infile", "informat",
+ "input", "keep", "label", "leave", "length", "libname", "link",
+ "list", "lostcard", "merge", "missing", "modify", "options", "output",
+ "out", "page", "put", "redirect", "remove", "rename", "replace",
+ "retain", "return", "select", "set", "skip", "startsas", "stop",
+ "title", "update", "waitsas", "where", "window", "x", "systask"
+ )
+
+ builtins_sql = (
+ "add", "and", "alter", "as", "cascade", "check", "create",
+ "delete", "describe", "distinct", "drop", "foreign", "from",
+ "group", "having", "index", "insert", "into", "in", "key", "like",
+ "message", "modify", "msgtype", "not", "null", "on", "or",
+ "order", "primary", "references", "reset", "restrict", "select",
+ "set", "table", "unique", "update", "validate", "view", "where"
+ )
+
+ builtins_functions = (
+ "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
+ "attrn", "band", "betainv", "blshift", "bnot", "bor",
+ "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
+ "close", "cnonct", "collate", "compbl", "compound",
+ "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
+ "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
+ "datejul", "datepart", "datetime", "day", "dclose", "depdb",
+ "depdbsl", "depsl", "depsyd",
+ "deptab", "dequote", "dhms", "dif", "digamma",
+ "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
+ "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
+ "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
+ "fexist", "fget", "fileexist", "filename", "fileref",
+ "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
+ "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
+ "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
+ "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
+ "hbound", "hms", "hosthelp", "hour", "ibessel", "index",
+ "indexc", "indexw", "input", "inputc", "inputn", "int",
+ "intck", "intnx", "intrr", "irr", "jbessel", "juldate",
+ "kurtosis", "lag", "lbound", "left", "length", "lgamma",
+ "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
+ "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
+ "mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
+ "normal", "note", "npv", "open", "ordinal", "pathname",
+ "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
+ "probbeta", "probbnml", "probchi", "probf", "probgam",
+ "probhypr", "probit", "probnegb", "probnorm", "probt",
+ "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
+ "ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
+ "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
+ "rewind", "right", "round", "saving", "scan", "sdf", "second",
+ "sign", "sin", "sinh", "skewness", "soundex", "spedis",
+ "sqrt", "std", "stderr", "stfips", "stname", "stnamel",
+ "substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
+ "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
+ "tnonct", "today", "translate", "tranwrd", "trigamma",
+ "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
+ "varfmt", "varinfmt", "varlabel", "varlen", "varname",
+ "varnum", "varray", "varrayx", "vartype", "verify", "vformat",
+ "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
+ "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
+ "vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
+ "vinformatw", "vinformatwx", "vinformatx", "vlabel",
+ "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
+ "vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
+ "zipnamel", "zipstate"
+ )
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('proc-data'),
+ include('cards-datalines'),
+ include('logs'),
+ include('general'),
+ (r'.', Text),
+ ],
+ # SAS is multi-line regardless, but * is ended by ;
+ 'comments': [
+ (r'^\s*\*.*?;', Comment),
+ (r'/\*.*?\*/', Comment),
+ (r'^\s*\*(.|\n)*?;', Comment.Multiline),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ ],
+ # Special highlight for proc, data, quit, run
+ 'proc-data': [
+ (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
+ Keyword.Reserved),
+ ],
+ # Special highlight cards and datalines
+ 'cards-datalines': [
+ (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
+ ],
+ 'data': [
+ (r'(.|\n)*^\s*;\s*$', Other, '#pop'),
+ ],
+ # Special highlight for put NOTE|ERROR|WARNING (order matters)
+ 'logs': [
+ (r'\n?^\s*%?put ', Keyword, 'log-messages'),
+ ],
+ 'log-messages': [
+ (r'NOTE(:|-).*', Generic, '#pop'),
+ (r'WARNING(:|-).*', Generic.Emph, '#pop'),
+ (r'ERROR(:|-).*', Generic.Error, '#pop'),
+ include('general'),
+ ],
+ 'general': [
+ include('keywords'),
+ include('vars-strings'),
+ include('special'),
+ include('numbers'),
+ ],
+ # Keywords, statements, functions, macros
+ 'keywords': [
+ (words(builtins_statements,
+ prefix = r'\b',
+ suffix = r'\b'),
+ Keyword),
+ (words(builtins_sql,
+ prefix = r'\b',
+ suffix = r'\b'),
+ Keyword),
+ (words(builtins_conditionals,
+ prefix = r'\b',
+ suffix = r'\b'),
+ Keyword),
+ (words(builtins_macros,
+ prefix = r'%',
+ suffix = r'\b'),
+ Name.Builtin),
+ (words(builtins_functions,
+ prefix = r'\b',
+ suffix = r'\('),
+ Name.Builtin),
+ ],
+ # Strings and user-defined variables and macros (order matters)
+ 'vars-strings': [
+ (r'&[a-z_]\w{0,31}\.?', Name.Variable),
+ (r'%[a-z_]\w{0,31}', Name.Function),
+ (r'\'', String, 'string_squote'),
+ (r'"', String, 'string_dquote'),
+ ],
+ 'string_squote': [
+ ('\'', String, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape),
+ # AFAIK, macro variables are not evaluated in single quotes
+ # (r'&', Name.Variable, 'validvar'),
+ (r'[^$\'\\]+', String),
+ (r'[$\'\\]', String),
+ ],
+ 'string_dquote': [
+ (r'"', String, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape),
+ (r'&', Name.Variable, 'validvar'),
+ (r'[^$&"\\]+', String),
+ (r'[$"\\]', String),
+ ],
+ 'validvar': [
+ (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
+ ],
+ # SAS numbers and special variables
+ 'numbers': [
+ (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
+ Number),
+ ],
+ 'special': [
+ (r'(null|missing|_all_|_automatic_|_character_|_n_|'
+ r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
+ Keyword.Constant),
+ ],
+ # 'operators': [
+ # (r'(-|=|<=|>=|<|>|<>|&|!=|'
+ # r'\||\*|\+|\^|/|!|~|~=)', Operator)
+ # ],
+ }
diff --git a/pygments/lexers/savi.py b/pygments/lexers/savi.py
new file mode 100644
index 0000000..2397fab
--- /dev/null
+++ b/pygments/lexers/savi.py
@@ -0,0 +1,170 @@
+"""
+ pygments.lexers.savi
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Savi.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include
+from pygments.token import Whitespace, Keyword, Name, String, Number, \
+ Operator, Punctuation, Comment, Generic, Error
+
+__all__ = ['SaviLexer']
+
+
+# The canonical version of this file can be found in the following repository,
+# where it is kept in sync with any language changes, as well as the other
+# pygments-like lexers that are maintained for use with other tools:
+# - https://github.com/savi-lang/savi/blob/main/tooling/pygments/lexers/savi.py
+#
+# If you're changing this file in the pygments repository, please ensure that
+# any changes you make are also propagated to the official Savi repository,
+# in order to avoid accidental clobbering of your changes later when an update
+# from the Savi repository flows forward into the pygments repository.
+#
+# If you're changing this file in the Savi repository, please ensure that
+# any changes you make are also reflected in the other pygments-like lexers
+# (rouge, vscode, etc) so that all of the lexers can be kept cleanly in sync.
+
+class SaviLexer(RegexLexer):
+ """
+ For Savi source code.
+
+ .. versionadded: 2.10
+ """
+
+ name = 'Savi'
+ url = 'https://github.com/savi-lang/savi'
+ aliases = ['savi']
+ filenames = ['*.savi']
+
+ tokens = {
+ "root": [
+ # Line Comment
+ (r'//.*?$', Comment.Single),
+
+ # Doc Comment
+ (r'::.*?$', Comment.Single),
+
+ # Capability Operator
+ (r'(\')(\w+)(?=[^\'])', bygroups(Operator, Name)),
+
+ # Double-Quote String
+ (r'\w?"', String.Double, "string.double"),
+
+ # Single-Char String
+ (r"'", String.Char, "string.char"),
+
+ # Type Name
+ (r'(_?[A-Z]\w*)', Name.Class),
+
+ # Nested Type Name
+ (r'(\.)(\s*)(_?[A-Z]\w*)', bygroups(Punctuation, Whitespace, Name.Class)),
+
+ # Declare
+ (r'^([ \t]*)(:\w+)',
+ bygroups(Whitespace, Name.Tag),
+ "decl"),
+
+ # Error-Raising Calls/Names
+ (r'((\w+|\+|\-|\*)\!)', Generic.Deleted),
+
+ # Numeric Values
+ (r'\b\d([\d_]*(\.[\d_]+)?)\b', Number),
+
+ # Hex Numeric Values
+ (r'\b0x([0-9a-fA-F_]+)\b', Number.Hex),
+
+ # Binary Numeric Values
+ (r'\b0b([01_]+)\b', Number.Bin),
+
+ # Function Call (with braces)
+ (r'\w+(?=\()', Name.Function),
+
+ # Function Call (with receiver)
+ (r'(\.)(\s*)(\w+)', bygroups(Punctuation, Whitespace, Name.Function)),
+
+ # Function Call (with self receiver)
+ (r'(@)(\w+)', bygroups(Punctuation, Name.Function)),
+
+ # Parenthesis
+ (r'\(', Punctuation, "root"),
+ (r'\)', Punctuation, "#pop"),
+
+ # Brace
+ (r'\{', Punctuation, "root"),
+ (r'\}', Punctuation, "#pop"),
+
+ # Bracket
+ (r'\[', Punctuation, "root"),
+ (r'(\])(\!)', bygroups(Punctuation, Generic.Deleted), "#pop"),
+ (r'\]', Punctuation, "#pop"),
+
+ # Punctuation
+ (r'[,;:\.@]', Punctuation),
+
+ # Piping Operators
+ (r'(\|\>)', Operator),
+
+ # Branching Operators
+ (r'(\&\&|\|\||\?\?|\&\?|\|\?|\.\?)', Operator),
+
+ # Comparison Operators
+ (r'(\<\=\>|\=\~|\=\=|\<\=|\>\=|\<|\>)', Operator),
+
+ # Arithmetic Operators
+ (r'(\+|\-|\/|\*|\%)', Operator),
+
+ # Assignment Operators
+ (r'(\=)', Operator),
+
+ # Other Operators
+ (r'(\!|\<\<|\<|\&|\|)', Operator),
+
+ # Identifiers
+ (r'\b\w+\b', Name),
+
+ # Whitespace
+ (r'[ \t\r]+\n*|\n+', Whitespace),
+ ],
+
+ # Declare (nested rules)
+ "decl": [
+ (r'\b[a-z_]\w*\b(?!\!)', Keyword.Declaration),
+ (r':', Punctuation, "#pop"),
+ (r'\n', Whitespace, "#pop"),
+ include("root"),
+ ],
+
+ # Double-Quote String (nested rules)
+ "string.double": [
+ (r'\\\(', String.Interpol, "string.interpolation"),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ (r'\\[bfnrt\\\']', String.Escape),
+ (r'\\"', String.Escape),
+ (r'"', String.Double, "#pop"),
+ (r'[^\\"]+', String.Double),
+ (r'.', Error),
+ ],
+
+ # Single-Char String (nested rules)
+ "string.char": [
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ (r'\\[bfnrt\\\']', String.Escape),
+ (r"\\'", String.Escape),
+ (r"'", String.Char, "#pop"),
+ (r"[^\\']+", String.Char),
+ (r'.', Error),
+ ],
+
+ # Interpolation inside String (nested rules)
+ "string.interpolation": [
+ (r"\)", String.Interpol, "#pop"),
+ include("root"),
+ ]
+ }
diff --git a/pygments/lexers/scdoc.py b/pygments/lexers/scdoc.py
new file mode 100644
index 0000000..f670d33
--- /dev/null
+++ b/pygments/lexers/scdoc.py
@@ -0,0 +1,79 @@
+"""
+ pygments.lexers.scdoc
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for scdoc, a simple man page generator.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this
+from pygments.token import Text, Comment, Keyword, String, Generic
+
+__all__ = ['ScdocLexer']
+
+
+class ScdocLexer(RegexLexer):
+ """
+ `scdoc` is a simple man page generator for POSIX systems written in C99.
+
+ .. versionadded:: 2.5
+ """
+ name = 'scdoc'
+ url = 'https://git.sr.ht/~sircmpwn/scdoc'
+ aliases = ['scdoc', 'scd']
+ filenames = ['*.scd', '*.scdoc']
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ # comment
+ (r'^(;.+\n)', bygroups(Comment)),
+
+ # heading with pound prefix
+ (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)),
+ (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)),
+ # bulleted lists
+ (r'^(\s*)([*-])(\s)(.+\n)',
+ bygroups(Text, Keyword, Text, using(this, state='inline'))),
+ # numbered lists
+ (r'^(\s*)(\.+\.)( .+\n)',
+ bygroups(Text, Keyword, using(this, state='inline'))),
+ # quote
+ (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
+ # text block
+ (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
+
+ include('inline'),
+ ],
+ 'inline': [
+ # escape
+ (r'\\.', Text),
+ # underlines
+ (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)),
+ # bold
+ (r'(\s)(\*[^*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)),
+ # inline code
+ (r'`[^`]+`', String.Backtick),
+
+ # general text, must come last!
+ (r'[^\\\s]+', Text),
+ (r'.', Text),
+ ],
+ }
+
+ def analyse_text(text):
+ """This is very similar to markdown, save for the escape characters
+ needed for * and _."""
+ result = 0
+
+ if '\\*' in text:
+ result += 0.01
+
+ if '\\_' in text:
+ result += 0.01
+
+ return result
diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py
new file mode 100644
index 0000000..25cc406
--- /dev/null
+++ b/pygments/lexers/scripting.py
@@ -0,0 +1,1286 @@
+"""
+ pygments.lexers.scripting
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for scripting and embedded languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, default, combined, \
+ words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error, Whitespace, Other
+from pygments.util import get_bool_opt, get_list_opt
+
+__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer',
+ 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer',
+ 'EasytrieveLexer', 'JclLexer', 'MiniScriptLexer']
+
+
+class LuaLexer(RegexLexer):
+ """
+ For Lua source code.
+
+ Additional options accepted:
+
+ `func_name_highlighting`
+ If given and ``True``, highlight builtin function names
+ (default: ``True``).
+ `disabled_modules`
+ If given, must be a list of module names whose function names
+ should not be highlighted. By default all modules are highlighted.
+
+ To get a list of allowed modules have a look into the
+ `_lua_builtins` module:
+
+ .. sourcecode:: pycon
+
+ >>> from pygments.lexers._lua_builtins import MODULES
+ >>> MODULES.keys()
+ ['string', 'coroutine', 'modules', 'io', 'basic', ...]
+ """
+
+ name = 'Lua'
+ url = 'https://www.lua.org/'
+ aliases = ['lua']
+ filenames = ['*.lua', '*.wlua']
+ mimetypes = ['text/x-lua', 'application/x-lua']
+
+ _comment_multiline = r'(?:--\[(?P<level>=*)\[[\w\W]*?\](?P=level)\])'
+ _comment_single = r'(?:--.*$)'
+ _space = r'(?:\s+)'
+ _s = r'(?:%s|%s|%s)' % (_comment_multiline, _comment_single, _space)
+ _name = r'(?:[^\W\d]\w*)'
+
+ tokens = {
+ 'root': [
+ # Lua allows a file to start with a shebang.
+ (r'#!.*', Comment.Preproc),
+ default('base'),
+ ],
+ 'ws': [
+ (_comment_multiline, Comment.Multiline),
+ (_comment_single, Comment.Single),
+ (_space, Text),
+ ],
+ 'base': [
+ include('ws'),
+
+ (r'(?i)0x[\da-f]*(\.[\da-f]*)?(p[+-]?\d+)?', Number.Hex),
+ (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
+ (r'(?i)\d+e[+-]?\d+', Number.Float),
+ (r'\d+', Number.Integer),
+
+ # multiline strings
+ (r'(?s)\[(=*)\[.*?\]\1\]', String),
+
+ (r'::', Punctuation, 'label'),
+ (r'\.{3}', Punctuation),
+ (r'[=<>|~&+\-*/%#^]+|\.\.', Operator),
+ (r'[\[\]{}().,:;]', Punctuation),
+ (r'(and|or|not)\b', Operator.Word),
+
+ ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
+ r'while)\b', Keyword.Reserved),
+ (r'goto\b', Keyword.Reserved, 'goto'),
+ (r'(local)\b', Keyword.Declaration),
+ (r'(true|false|nil)\b', Keyword.Constant),
+
+ (r'(function)\b', Keyword.Reserved, 'funcname'),
+
+ (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
+
+ ("'", String.Single, combined('stringescape', 'sqs')),
+ ('"', String.Double, combined('stringescape', 'dqs'))
+ ],
+
+ 'funcname': [
+ include('ws'),
+ (r'[.:]', Punctuation),
+ (r'%s(?=%s*[.:])' % (_name, _s), Name.Class),
+ (_name, Name.Function, '#pop'),
+ # inline function
+ (r'\(', Punctuation, '#pop'),
+ ],
+
+ 'goto': [
+ include('ws'),
+ (_name, Name.Label, '#pop'),
+ ],
+
+ 'label': [
+ include('ws'),
+ (r'::', Punctuation, '#pop'),
+ (_name, Name.Label),
+ ],
+
+ 'stringescape': [
+ (r'\\([abfnrtv\\"\']|[\r\n]{1,2}|z\s*|x[0-9a-fA-F]{2}|\d{1,3}|'
+ r'u\{[0-9a-fA-F]+\})', String.Escape),
+ ],
+
+ 'sqs': [
+ (r"'", String.Single, '#pop'),
+ (r"[^\\']+", String.Single),
+ ],
+
+ 'dqs': [
+ (r'"', String.Double, '#pop'),
+ (r'[^\\"]+', String.Double),
+ ]
+ }
+
+ def __init__(self, **options):
+ self.func_name_highlighting = get_bool_opt(
+ options, 'func_name_highlighting', True)
+ self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
+
+ self._functions = set()
+ if self.func_name_highlighting:
+ from pygments.lexers._lua_builtins import MODULES
+ for mod, func in MODULES.items():
+ if mod not in self.disabled_modules:
+ self._functions.update(func)
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name:
+ if value in self._functions:
+ yield index, Name.Builtin, value
+ continue
+ elif '.' in value:
+ a, b = value.split('.')
+ yield index, Name, a
+ yield index + len(a), Punctuation, '.'
+ yield index + len(a) + 1, Name, b
+ continue
+ yield index, token, value
+
+class MoonScriptLexer(LuaLexer):
+ """
+ For MoonScript source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'MoonScript'
+ url = 'http://moonscript.org'
+ aliases = ['moonscript', 'moon']
+ filenames = ['*.moon']
+ mimetypes = ['text/x-moonscript', 'application/x-moonscript']
+
+ tokens = {
+ 'root': [
+ (r'#!(.*?)$', Comment.Preproc),
+ default('base'),
+ ],
+ 'base': [
+ ('--.*$', Comment.Single),
+ (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
+ (r'(?i)\d+e[+-]?\d+', Number.Float),
+ (r'(?i)0x[0-9a-f]*', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'\n', Whitespace),
+ (r'[^\S\n]+', Text),
+ (r'(?s)\[(=*)\[.*?\]\1\]', String),
+ (r'(->|=>)', Name.Function),
+ (r':[a-zA-Z_]\w*', Name.Variable),
+ (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator),
+ (r'[;,]', Punctuation),
+ (r'[\[\]{}()]', Keyword.Type),
+ (r'[a-zA-Z_]\w*:', Name.Variable),
+ (words((
+ 'class', 'extends', 'if', 'then', 'super', 'do', 'with',
+ 'import', 'export', 'while', 'elseif', 'return', 'for', 'in',
+ 'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch',
+ 'break'), suffix=r'\b'),
+ Keyword),
+ (r'(true|false|nil)\b', Keyword.Constant),
+ (r'(and|or|not)\b', Operator.Word),
+ (r'(self)\b', Name.Builtin.Pseudo),
+ (r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class),
+ (r'[A-Z]\w*', Name.Class), # proper name
+ (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
+ ("'", String.Single, combined('stringescape', 'sqs')),
+ ('"', String.Double, combined('stringescape', 'dqs'))
+ ],
+ 'stringescape': [
+ (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
+ ],
+ 'sqs': [
+ ("'", String.Single, '#pop'),
+ ("[^']+", String)
+ ],
+ 'dqs': [
+ ('"', String.Double, '#pop'),
+ ('[^"]+', String)
+ ]
+ }
+
+ def get_tokens_unprocessed(self, text):
+ # set . as Operator instead of Punctuation
+ for index, token, value in LuaLexer.get_tokens_unprocessed(self, text):
+ if token == Punctuation and value == ".":
+ token = Operator
+ yield index, token, value
+
+
+class ChaiscriptLexer(RegexLexer):
+ """
+ For ChaiScript source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'ChaiScript'
+ url = 'http://chaiscript.com/'
+ aliases = ['chaiscript', 'chai']
+ filenames = ['*.chai']
+ mimetypes = ['text/x-chaiscript', 'application/x-chaiscript']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'^\#.*?\n', Comment.Single)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ include('commentsandwhitespace'),
+ (r'\n', Text),
+ (r'[^\S\n]+', Text),
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.'
+ r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'[=+\-*/]', Operator),
+ (r'(for|in|while|do|break|return|continue|if|else|'
+ r'throw|try|catch'
+ r')\b', Keyword, 'slashstartsregex'),
+ (r'(var)\b', Keyword.Declaration, 'slashstartsregex'),
+ (r'(attr|def|fun)\b', Keyword.Reserved),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'(eval|throw)\b', Name.Builtin),
+ (r'`\S+`', Name.Builtin),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"', String.Double, 'dqstring'),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ],
+ 'dqstring': [
+ (r'\$\{[^"}]+?\}', String.Interpol),
+ (r'\$', String.Double),
+ (r'\\\\', String.Double),
+ (r'\\"', String.Double),
+ (r'[^\\"$]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ }
+
+
+class LSLLexer(RegexLexer):
+ """
+ For Second Life's Linden Scripting Language source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'LSL'
+ aliases = ['lsl']
+ filenames = ['*.lsl']
+ mimetypes = ['text/x-lsl']
+
+ flags = re.MULTILINE
+
+ lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b'
+ lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b'
+ lsl_states = r'\b(?:(?:state)\s+\w+|default)\b'
+ lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b'
+ lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b'
+ lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b'
+ lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b'
+ lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b'
+ lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b'
+ lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b'
+ lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b'
+ lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b'
+ lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b'
+ lsl_invalid_illegal = r'\b(?:event)\b'
+ lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b'
+ lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b'
+ lsl_reserved_log = r'\b(?:print)\b'
+ lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?'
+
+ tokens = {
+ 'root':
+ [
+ (r'//.*?\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"', String.Double, 'string'),
+ (lsl_keywords, Keyword),
+ (lsl_types, Keyword.Type),
+ (lsl_states, Name.Class),
+ (lsl_events, Name.Builtin),
+ (lsl_functions_builtin, Name.Function),
+ (lsl_constants_float, Keyword.Constant),
+ (lsl_constants_integer, Keyword.Constant),
+ (lsl_constants_integer_boolean, Keyword.Constant),
+ (lsl_constants_rotation, Keyword.Constant),
+ (lsl_constants_string, Keyword.Constant),
+ (lsl_constants_vector, Keyword.Constant),
+ (lsl_invalid_broken, Error),
+ (lsl_invalid_deprecated, Error),
+ (lsl_invalid_illegal, Error),
+ (lsl_invalid_unimplemented, Error),
+ (lsl_reserved_godmode, Keyword.Reserved),
+ (lsl_reserved_log, Keyword.Reserved),
+ (r'\b([a-zA-Z_]\w*)\b', Name.Variable),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float),
+ (r'(\d+\.\d*|\.\d+)', Number.Float),
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (lsl_operators, Operator),
+ (r':=?', Error),
+ (r'[,;{}()\[\]]', Punctuation),
+ (r'\n+', Whitespace),
+ (r'\s+', Whitespace)
+ ],
+ 'comment':
+ [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'string':
+ [
+ (r'\\([nt"\\])', String.Escape),
+ (r'"', String.Double, '#pop'),
+ (r'\\.', Error),
+ (r'[^"\\]+', String.Double),
+ ]
+ }
+
+
+class AppleScriptLexer(RegexLexer):
+ """
+ For AppleScript source code,
+ including `AppleScript Studio
+ <http://developer.apple.com/documentation/AppleScript/
+ Reference/StudioReference>`_.
+ Contributed by Andreas Amann <aamann@mac.com>.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'AppleScript'
+ url = 'https://developer.apple.com/library/archive/documentation/AppleScript/Conceptual/AppleScriptLangGuide/introduction/ASLR_intro.html'
+ aliases = ['applescript']
+ filenames = ['*.applescript']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ Identifiers = r'[a-zA-Z]\w*'
+
+ # XXX: use words() for all of these
+ Literals = ('AppleScript', 'current application', 'false', 'linefeed',
+ 'missing value', 'pi', 'quote', 'result', 'return', 'space',
+ 'tab', 'text item delimiters', 'true', 'version')
+ Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ',
+ 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
+ 'real ', 'record ', 'reference ', 'RGB color ', 'script ',
+ 'text ', 'unit types', '(?:Unicode )?text', 'string')
+ BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month',
+ 'paragraph', 'word', 'year')
+ HandlerParams = ('about', 'above', 'against', 'apart from', 'around',
+ 'aside from', 'at', 'below', 'beneath', 'beside',
+ 'between', 'for', 'given', 'instead of', 'on', 'onto',
+ 'out of', 'over', 'since')
+ Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL',
+ 'choose application', 'choose color', 'choose file( name)?',
+ 'choose folder', 'choose from list',
+ 'choose remote application', 'clipboard info',
+ 'close( access)?', 'copy', 'count', 'current date', 'delay',
+ 'delete', 'display (alert|dialog)', 'do shell script',
+ 'duplicate', 'exists', 'get eof', 'get volume settings',
+ 'info for', 'launch', 'list (disks|folder)', 'load script',
+ 'log', 'make', 'mount volume', 'new', 'offset',
+ 'open( (for access|location))?', 'path to', 'print', 'quit',
+ 'random number', 'read', 'round', 'run( script)?',
+ 'say', 'scripting components',
+ 'set (eof|the clipboard to|volume)', 'store script',
+ 'summarize', 'system attribute', 'system info',
+ 'the clipboard', 'time to GMT', 'write', 'quoted form')
+ References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
+ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
+ 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
+ 'before', 'behind', 'every', 'front', 'index', 'last',
+ 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose')
+ Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not",
+ "isn't", "isn't equal( to)?", "is not equal( to)?",
+ "doesn't equal", "does not equal", "(is )?greater than",
+ "comes after", "is not less than or equal( to)?",
+ "isn't less than or equal( to)?", "(is )?less than",
+ "comes before", "is not greater than or equal( to)?",
+ "isn't greater than or equal( to)?",
+ "(is )?greater than or equal( to)?", "is not less than",
+ "isn't less than", "does not come before",
+ "doesn't come before", "(is )?less than or equal( to)?",
+ "is not greater than", "isn't greater than",
+ "does not come after", "doesn't come after", "starts? with",
+ "begins? with", "ends? with", "contains?", "does not contain",
+ "doesn't contain", "is in", "is contained by", "is not in",
+ "is not contained by", "isn't contained by", "div", "mod",
+ "not", "(a )?(ref( to)?|reference to)", "is", "does")
+ Control = ('considering', 'else', 'error', 'exit', 'from', 'if',
+ 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
+ 'try', 'until', 'using terms from', 'while', 'whith',
+ 'with timeout( of)?', 'with transaction', 'by', 'continue',
+ 'end', 'its?', 'me', 'my', 'return', 'of', 'as')
+ Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get')
+ Reserved = ('but', 'put', 'returning', 'the')
+ StudioClasses = ('action cell', 'alert reply', 'application', 'box',
+ 'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
+ 'clip view', 'color well', 'color-panel',
+ 'combo box( item)?', 'control',
+ 'data( (cell|column|item|row|source))?', 'default entry',
+ 'dialog reply', 'document', 'drag info', 'drawer',
+ 'event', 'font(-panel)?', 'formatter',
+ 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
+ 'movie( view)?', 'open-panel', 'outline view', 'panel',
+ 'pasteboard', 'plugin', 'popup button',
+ 'progress indicator', 'responder', 'save-panel',
+ 'scroll view', 'secure text field( cell)?', 'slider',
+ 'sound', 'split view', 'stepper', 'tab view( item)?',
+ 'table( (column|header cell|header view|view))',
+ 'text( (field( cell)?|view))?', 'toolbar( item)?',
+ 'user-defaults', 'view', 'window')
+ StudioEvents = ('accept outline drop', 'accept table drop', 'action',
+ 'activated', 'alert ended', 'awake from nib', 'became key',
+ 'became main', 'begin editing', 'bounds changed',
+ 'cell value', 'cell value changed', 'change cell value',
+ 'change item value', 'changed', 'child of item',
+ 'choose menu item', 'clicked', 'clicked toolbar item',
+ 'closed', 'column clicked', 'column moved',
+ 'column resized', 'conclude drop', 'data representation',
+ 'deminiaturized', 'dialog ended', 'document nib name',
+ 'double clicked', 'drag( (entered|exited|updated))?',
+ 'drop', 'end editing', 'exposed', 'idle', 'item expandable',
+ 'item value', 'item value changed', 'items changed',
+ 'keyboard down', 'keyboard up', 'launched',
+ 'load data representation', 'miniaturized', 'mouse down',
+ 'mouse dragged', 'mouse entered', 'mouse exited',
+ 'mouse moved', 'mouse up', 'moved',
+ 'number of browser rows', 'number of items',
+ 'number of rows', 'open untitled', 'opened', 'panel ended',
+ 'parameters updated', 'plugin loaded', 'prepare drop',
+ 'prepare outline drag', 'prepare outline drop',
+ 'prepare table drag', 'prepare table drop',
+ 'read from file', 'resigned active', 'resigned key',
+ 'resigned main', 'resized( sub views)?',
+ 'right mouse down', 'right mouse dragged',
+ 'right mouse up', 'rows changed', 'scroll wheel',
+ 'selected tab view item', 'selection changed',
+ 'selection changing', 'should begin editing',
+ 'should close', 'should collapse item',
+ 'should end editing', 'should expand item',
+ 'should open( untitled)?',
+ 'should quit( after last window closed)?',
+ 'should select column', 'should select item',
+ 'should select row', 'should select tab view item',
+ 'should selection change', 'should zoom', 'shown',
+ 'update menu item', 'update parameters',
+ 'update toolbar item', 'was hidden', 'was miniaturized',
+ 'will become active', 'will close', 'will dismiss',
+ 'will display browser cell', 'will display cell',
+ 'will display item cell', 'will display outline cell',
+ 'will finish launching', 'will hide', 'will miniaturize',
+ 'will move', 'will open', 'will pop up', 'will quit',
+ 'will resign active', 'will resize( sub views)?',
+ 'will select tab view item', 'will show', 'will zoom',
+ 'write to file', 'zoomed')
+ StudioCommands = ('animate', 'append', 'call method', 'center',
+ 'close drawer', 'close panel', 'display',
+ 'display alert', 'display dialog', 'display panel', 'go',
+ 'hide', 'highlight', 'increment', 'item for',
+ 'load image', 'load movie', 'load nib', 'load panel',
+ 'load sound', 'localized string', 'lock focus', 'log',
+ 'open drawer', 'path for', 'pause', 'perform action',
+ 'play', 'register', 'resume', 'scroll', 'select( all)?',
+ 'show', 'size to fit', 'start', 'step back',
+ 'step forward', 'stop', 'synchronize', 'unlock focus',
+ 'update')
+ StudioProperties = ('accepts arrow key', 'action method', 'active',
+ 'alignment', 'allowed identifiers',
+ 'allows branch selection', 'allows column reordering',
+ 'allows column resizing', 'allows column selection',
+ 'allows customization',
+ 'allows editing text attributes',
+ 'allows empty selection', 'allows mixed state',
+ 'allows multiple selection', 'allows reordering',
+ 'allows undo', 'alpha( value)?', 'alternate image',
+ 'alternate increment value', 'alternate title',
+ 'animation delay', 'associated file name',
+ 'associated object', 'auto completes', 'auto display',
+ 'auto enables items', 'auto repeat',
+ 'auto resizes( outline column)?',
+ 'auto save expanded items', 'auto save name',
+ 'auto save table columns', 'auto saves configuration',
+ 'auto scroll', 'auto sizes all columns to fit',
+ 'auto sizes cells', 'background color', 'bezel state',
+ 'bezel style', 'bezeled', 'border rect', 'border type',
+ 'bordered', 'bounds( rotation)?', 'box type',
+ 'button returned', 'button type',
+ 'can choose directories', 'can choose files',
+ 'can draw', 'can hide',
+ 'cell( (background color|size|type))?', 'characters',
+ 'class', 'click count', 'clicked( data)? column',
+ 'clicked data item', 'clicked( data)? row',
+ 'closeable', 'collating', 'color( (mode|panel))',
+ 'command key down', 'configuration',
+ 'content(s| (size|view( margins)?))?', 'context',
+ 'continuous', 'control key down', 'control size',
+ 'control tint', 'control view',
+ 'controller visible', 'coordinate system',
+ 'copies( on scroll)?', 'corner view', 'current cell',
+ 'current column', 'current( field)? editor',
+ 'current( menu)? item', 'current row',
+ 'current tab view item', 'data source',
+ 'default identifiers', 'delta (x|y|z)',
+ 'destination window', 'directory', 'display mode',
+ 'displayed cell', 'document( (edited|rect|view))?',
+ 'double value', 'dragged column', 'dragged distance',
+ 'dragged items', 'draws( cell)? background',
+ 'draws grid', 'dynamically scrolls', 'echos bullets',
+ 'edge', 'editable', 'edited( data)? column',
+ 'edited data item', 'edited( data)? row', 'enabled',
+ 'enclosing scroll view', 'ending page',
+ 'error handling', 'event number', 'event type',
+ 'excluded from windows menu', 'executable path',
+ 'expanded', 'fax number', 'field editor', 'file kind',
+ 'file name', 'file type', 'first responder',
+ 'first visible column', 'flipped', 'floating',
+ 'font( panel)?', 'formatter', 'frameworks path',
+ 'frontmost', 'gave up', 'grid color', 'has data items',
+ 'has horizontal ruler', 'has horizontal scroller',
+ 'has parent data item', 'has resize indicator',
+ 'has shadow', 'has sub menu', 'has vertical ruler',
+ 'has vertical scroller', 'header cell', 'header view',
+ 'hidden', 'hides when deactivated', 'highlights by',
+ 'horizontal line scroll', 'horizontal page scroll',
+ 'horizontal ruler view', 'horizontally resizable',
+ 'icon image', 'id', 'identifier',
+ 'ignores multiple clicks',
+ 'image( (alignment|dims when disabled|frame style|scaling))?',
+ 'imports graphics', 'increment value',
+ 'indentation per level', 'indeterminate', 'index',
+ 'integer value', 'intercell spacing', 'item height',
+ 'key( (code|equivalent( modifier)?|window))?',
+ 'knob thickness', 'label', 'last( visible)? column',
+ 'leading offset', 'leaf', 'level', 'line scroll',
+ 'loaded', 'localized sort', 'location', 'loop mode',
+ 'main( (bunde|menu|window))?', 'marker follows cell',
+ 'matrix mode', 'maximum( content)? size',
+ 'maximum visible columns',
+ 'menu( form representation)?', 'miniaturizable',
+ 'miniaturized', 'minimized image', 'minimized title',
+ 'minimum column width', 'minimum( content)? size',
+ 'modal', 'modified', 'mouse down state',
+ 'movie( (controller|file|rect))?', 'muted', 'name',
+ 'needs display', 'next state', 'next text',
+ 'number of tick marks', 'only tick mark values',
+ 'opaque', 'open panel', 'option key down',
+ 'outline table column', 'page scroll', 'pages across',
+ 'pages down', 'palette label', 'pane splitter',
+ 'parent data item', 'parent window', 'pasteboard',
+ 'path( (names|separator))?', 'playing',
+ 'plays every frame', 'plays selection only', 'position',
+ 'preferred edge', 'preferred type', 'pressure',
+ 'previous text', 'prompt', 'properties',
+ 'prototype cell', 'pulls down', 'rate',
+ 'released when closed', 'repeated',
+ 'requested print time', 'required file type',
+ 'resizable', 'resized column', 'resource path',
+ 'returns records', 'reuses columns', 'rich text',
+ 'roll over', 'row height', 'rulers visible',
+ 'save panel', 'scripts path', 'scrollable',
+ 'selectable( identifiers)?', 'selected cell',
+ 'selected( data)? columns?', 'selected data items?',
+ 'selected( data)? rows?', 'selected item identifier',
+ 'selection by rect', 'send action on arrow key',
+ 'sends action when done editing', 'separates columns',
+ 'separator item', 'sequence number', 'services menu',
+ 'shared frameworks path', 'shared support path',
+ 'sheet', 'shift key down', 'shows alpha',
+ 'shows state by', 'size( mode)?',
+ 'smart insert delete enabled', 'sort case sensitivity',
+ 'sort column', 'sort order', 'sort type',
+ 'sorted( data rows)?', 'sound', 'source( mask)?',
+ 'spell checking enabled', 'starting page', 'state',
+ 'string value', 'sub menu', 'super menu', 'super view',
+ 'tab key traverses cells', 'tab state', 'tab type',
+ 'tab view', 'table view', 'tag', 'target( printer)?',
+ 'text color', 'text container insert',
+ 'text container origin', 'text returned',
+ 'tick mark position', 'time stamp',
+ 'title(d| (cell|font|height|position|rect))?',
+ 'tool tip', 'toolbar', 'trailing offset', 'transparent',
+ 'treat packages as directories', 'truncated labels',
+ 'types', 'unmodified characters', 'update views',
+ 'use sort indicator', 'user defaults',
+ 'uses data source', 'uses ruler',
+ 'uses threaded animation',
+ 'uses title from previous column', 'value wraps',
+ 'version',
+ 'vertical( (line scroll|page scroll|ruler view))?',
+ 'vertically resizable', 'view',
+ 'visible( document rect)?', 'volume', 'width', 'window',
+ 'windows menu', 'wraps', 'zoomable', 'zoomed')
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'¬\n', String.Escape),
+ (r"'s\s+", Text), # This is a possessive, consider moving
+ (r'(--|#).*?$', Comment),
+ (r'\(\*', Comment.Multiline, 'comment'),
+ (r'[(){}!,.:]', Punctuation),
+ (r'(«)([^»]+)(»)',
+ bygroups(Text, Name.Builtin, Text)),
+ (r'\b((?:considering|ignoring)\s*)'
+ r'(application responses|case|diacriticals|hyphens|'
+ r'numeric strings|punctuation|white space)',
+ bygroups(Keyword, Name.Builtin)),
+ (r'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
+ (r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
+ (r'^(\s*(?:on|end)\s+)'
+ r'(%s)' % '|'.join(StudioEvents[::-1]),
+ bygroups(Keyword, Name.Function)),
+ (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
+ (r'\b(as )(%s)\b' % '|'.join(Classes),
+ bygroups(Keyword, Name.Class)),
+ (r'\b(%s)\b' % '|'.join(Literals), Name.Constant),
+ (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin),
+ (r'\b(%s)\b' % '|'.join(Control), Keyword),
+ (r'\b(%s)\b' % '|'.join(Declarations), Keyword),
+ (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin),
+ (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin),
+ (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin),
+ (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute),
+ (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin),
+ (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin),
+ (r'\b(%s)\b' % '|'.join(References), Name.Builtin),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r'\b(%s)\b' % Identifiers, Name.Variable),
+ (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
+ (r'[-+]?\d+', Number.Integer),
+ ],
+ 'comment': [
+ (r'\(\*', Comment.Multiline, '#push'),
+ (r'\*\)', Comment.Multiline, '#pop'),
+ ('[^*(]+', Comment.Multiline),
+ ('[*(]', Comment.Multiline),
+ ],
+ }
+
+
+class RexxLexer(RegexLexer):
+ """
+ Rexx is a scripting language available for
+ a wide range of different platforms with its roots found on mainframe
+ systems. It is popular for I/O- and data based tasks and can act as glue
+ language to bind different applications together.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Rexx'
+ url = 'http://www.rexxinfo.org/'
+ aliases = ['rexx', 'arexx']
+ filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx']
+ mimetypes = ['text/x-rexx']
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"', String, 'string_double'),
+ (r"'", String, 'string_single'),
+ (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number),
+ (r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b',
+ bygroups(Name.Function, Whitespace, Operator, Whitespace,
+ Keyword.Declaration)),
+ (r'([a-z_]\w*)(\s*)(:)',
+ bygroups(Name.Label, Whitespace, Operator)),
+ include('function'),
+ include('keyword'),
+ include('operator'),
+ (r'[a-z_]\w*', Text),
+ ],
+ 'function': [
+ (words((
+ 'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor',
+ 'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare',
+ 'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr',
+ 'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert',
+ 'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max',
+ 'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign',
+ 'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol',
+ 'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word',
+ 'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d',
+ 'xrange'), suffix=r'(\s*)(\()'),
+ bygroups(Name.Builtin, Whitespace, Operator)),
+ ],
+ 'keyword': [
+ (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|'
+ r'interpret|iterate|leave|nop|numeric|off|on|options|parse|'
+ r'pull|push|queue|return|say|select|signal|to|then|trace|until|'
+ r'while)\b', Keyword.Reserved),
+ ],
+ 'operator': [
+ (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
+ r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
+ r'¬>>|¬>|¬|\.|,)', Operator),
+ ],
+ 'string_double': [
+ (r'[^"\n]+', String),
+ (r'""', String),
+ (r'"', String, '#pop'),
+ (r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
+ ],
+ 'string_single': [
+ (r'[^\'\n]+', String),
+ (r'\'\'', String),
+ (r'\'', String, '#pop'),
+ (r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
+ ],
+ 'comment': [
+ (r'[^*]+', Comment.Multiline),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'\*', Comment.Multiline),
+ ]
+ }
+
+ _c = lambda s: re.compile(s, re.MULTILINE)
+ _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b')
+ _ADDRESS_PATTERN = _c(r'^\s*address\s+')
+ _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b')
+ _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$')
+ _PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b')
+ _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$')
+ _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b')
+ PATTERNS_AND_WEIGHTS = (
+ (_ADDRESS_COMMAND_PATTERN, 0.2),
+ (_ADDRESS_PATTERN, 0.05),
+ (_DO_WHILE_PATTERN, 0.1),
+ (_ELSE_DO_PATTERN, 0.1),
+ (_IF_THEN_DO_PATTERN, 0.1),
+ (_PROCEDURE_PATTERN, 0.5),
+ (_PARSE_ARG_PATTERN, 0.2),
+ )
+
+ def analyse_text(text):
+ """
+ Check for initial comment and patterns that distinguish Rexx from other
+ C-like languages.
+ """
+ if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
+ # Header matches MVS Rexx requirements, this is certainly a Rexx
+ # script.
+ return 1.0
+ elif text.startswith('/*'):
+ # Header matches general Rexx requirements; the source code might
+ # still be any language using C comments such as C++, C# or Java.
+ lowerText = text.lower()
+ result = sum(weight
+ for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
+ if pattern.search(lowerText)) + 0.01
+ return min(result, 1.0)
+
+
+class MOOCodeLexer(RegexLexer):
+ """
+ For MOOCode (the MOO scripting language).
+
+ .. versionadded:: 0.9
+ """
+ name = 'MOOCode'
+ url = 'http://www.moo.mud.org/'
+ filenames = ['*.moo']
+ aliases = ['moocode', 'moo']
+ mimetypes = ['text/x-moocode']
+
+ tokens = {
+ 'root': [
+ # Numbers
+ (r'(0|[1-9][0-9_]*)', Number.Integer),
+ # Strings
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # exceptions
+ (r'(E_PERM|E_DIV)', Name.Exception),
+ # db-refs
+ (r'((#[-0-9]+)|(\$\w+))', Name.Entity),
+ # Keywords
+ (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
+ r'|endwhile|break|continue|return|try'
+ r'|except|endtry|finally|in)\b', Keyword),
+ # builtins
+ (r'(random|length)', Name.Builtin),
+ # special variables
+ (r'(player|caller|this|args)', Name.Variable.Instance),
+ # skip whitespace
+ (r'\s+', Text),
+ (r'\n', Text),
+ # other operators
+ (r'([!;=,{}&|:.\[\]@()<>?]+)', Operator),
+ # function call
+ (r'(\w+)(\()', bygroups(Name.Function, Operator)),
+ # variables
+ (r'(\w+)', Text),
+ ]
+ }
+
+
+class HybrisLexer(RegexLexer):
+ """
+ For Hybris source code.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'Hybris'
+ aliases = ['hybris', 'hy']
+ filenames = ['*.hy', '*.hyb']
+ mimetypes = ['text/x-hybris', 'application/x-hybris']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:function|method|operator\s+)+?)'
+ r'([a-zA-Z_]\w*)'
+ r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)),
+ (r'[^\S\n]+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@[a-zA-Z_][\w.]*', Name.Decorator),
+ (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|'
+ r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword),
+ (r'(extends|private|protected|public|static|throws|function|method|'
+ r'operator)\b', Keyword.Declaration),
+ (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|'
+ r'__INC_PATH__)\b', Keyword.Constant),
+ (r'(class|struct)(\s+)',
+ bygroups(Keyword.Declaration, Text), 'class'),
+ (r'(import|include)(\s+)',
+ bygroups(Keyword.Namespace, Text), 'import'),
+ (words((
+ 'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold',
+ 'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32',
+ 'sha2', 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos',
+ 'cosh', 'exp', 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin',
+ 'sinh', 'sqrt', 'tan', 'tanh', 'isint', 'isfloat', 'ischar', 'isstring',
+ 'isarray', 'ismap', 'isalias', 'typeof', 'sizeof', 'toint', 'tostring',
+ 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', 'var_names',
+ 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call',
+ 'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks',
+ 'usleep', 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink',
+ 'dllcall', 'dllcall_argv', 'dllclose', 'env', 'exec', 'fork', 'getpid',
+ 'wait', 'popen', 'pclose', 'exit', 'kill', 'pthread_create',
+ 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill',
+ 'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind',
+ 'listen', 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect',
+ 'server', 'recv', 'send', 'close', 'print', 'println', 'printf', 'input',
+ 'readline', 'serial_open', 'serial_fcntl', 'serial_get_attr',
+ 'serial_get_ispeed', 'serial_get_ospeed', 'serial_set_attr',
+ 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', 'serial_read',
+ 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell',
+ 'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir',
+ 'pcre_replace', 'size', 'pop', 'unmap', 'has', 'keys', 'values',
+ 'length', 'find', 'substr', 'replace', 'split', 'trim', 'remove',
+ 'contains', 'join'), suffix=r'\b'),
+ Name.Builtin),
+ (words((
+ 'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process',
+ 'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket',
+ 'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'),
+ Keyword.Type),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
+ (r'(\.)([a-zA-Z_]\w*)',
+ bygroups(Operator, Name.Attribute)),
+ (r'[a-zA-Z_]\w*:', Name.Label),
+ (r'[a-zA-Z_$]\w*', Name),
+ (r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Text),
+ ],
+ 'class': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[\w.]+\*?', Name.Namespace, '#pop')
+ ],
+ }
+
+ def analyse_text(text):
+ """public method and private method don't seem to be quite common
+ elsewhere."""
+ result = 0
+ if re.search(r'\b(?:public|private)\s+method\b', text):
+ result += 0.01
+ return result
+
+
+
+class EasytrieveLexer(RegexLexer):
+ """
+ Easytrieve Plus is a programming language for extracting, filtering and
+ converting sequential data. Furthermore it can layout data for reports.
+ It is mainly used on mainframe platforms and can access several of the
+ mainframe's native file formats. It is somewhat comparable to awk.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Easytrieve'
+ aliases = ['easytrieve']
+ filenames = ['*.ezt', '*.mac']
+ mimetypes = ['text/x-easytrieve']
+ flags = 0
+
+ # Note: We cannot use r'\b' at the start and end of keywords because
+ # Easytrieve Plus delimiter characters are:
+ #
+ # * space ( )
+ # * apostrophe (')
+ # * period (.)
+ # * comma (,)
+ # * parenthesis ( and )
+ # * colon (:)
+ #
+ # Additionally words end once a '*' appears, indicatins a comment.
+ _DELIMITERS = r' \'.,():\n'
+ _DELIMITERS_OR_COMENT = _DELIMITERS + '*'
+ _DELIMITER_PATTERN = '[' + _DELIMITERS + ']'
+ _DELIMITER_PATTERN_CAPTURE = '(' + _DELIMITER_PATTERN + ')'
+ _NON_DELIMITER_OR_COMMENT_PATTERN = '[^' + _DELIMITERS_OR_COMENT + ']'
+ _OPERATORS_PATTERN = '[.+\\-/=\\[\\](){}<>;,&%¬]'
+ _KEYWORDS = [
+ 'AFTER-BREAK', 'AFTER-LINE', 'AFTER-SCREEN', 'AIM', 'AND', 'ATTR',
+ 'BEFORE', 'BEFORE-BREAK', 'BEFORE-LINE', 'BEFORE-SCREEN', 'BUSHU',
+ 'BY', 'CALL', 'CASE', 'CHECKPOINT', 'CHKP', 'CHKP-STATUS', 'CLEAR',
+ 'CLOSE', 'COL', 'COLOR', 'COMMIT', 'CONTROL', 'COPY', 'CURSOR', 'D',
+ 'DECLARE', 'DEFAULT', 'DEFINE', 'DELETE', 'DENWA', 'DISPLAY', 'DLI',
+ 'DO', 'DUPLICATE', 'E', 'ELSE', 'ELSE-IF', 'END', 'END-CASE',
+ 'END-DO', 'END-IF', 'END-PROC', 'ENDPAGE', 'ENDTABLE', 'ENTER', 'EOF',
+ 'EQ', 'ERROR', 'EXIT', 'EXTERNAL', 'EZLIB', 'F1', 'F10', 'F11', 'F12',
+ 'F13', 'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'F2', 'F20', 'F21',
+ 'F22', 'F23', 'F24', 'F25', 'F26', 'F27', 'F28', 'F29', 'F3', 'F30',
+ 'F31', 'F32', 'F33', 'F34', 'F35', 'F36', 'F4', 'F5', 'F6', 'F7',
+ 'F8', 'F9', 'FETCH', 'FILE-STATUS', 'FILL', 'FINAL', 'FIRST',
+ 'FIRST-DUP', 'FOR', 'GE', 'GET', 'GO', 'GOTO', 'GQ', 'GR', 'GT',
+ 'HEADING', 'HEX', 'HIGH-VALUES', 'IDD', 'IDMS', 'IF', 'IN', 'INSERT',
+ 'JUSTIFY', 'KANJI-DATE', 'KANJI-DATE-LONG', 'KANJI-TIME', 'KEY',
+ 'KEY-PRESSED', 'KOKUGO', 'KUN', 'LAST-DUP', 'LE', 'LEVEL', 'LIKE',
+ 'LINE', 'LINE-COUNT', 'LINE-NUMBER', 'LINK', 'LIST', 'LOW-VALUES',
+ 'LQ', 'LS', 'LT', 'MACRO', 'MASK', 'MATCHED', 'MEND', 'MESSAGE',
+ 'MOVE', 'MSTART', 'NE', 'NEWPAGE', 'NOMASK', 'NOPRINT', 'NOT',
+ 'NOTE', 'NOVERIFY', 'NQ', 'NULL', 'OF', 'OR', 'OTHERWISE', 'PA1',
+ 'PA2', 'PA3', 'PAGE-COUNT', 'PAGE-NUMBER', 'PARM-REGISTER',
+ 'PATH-ID', 'PATTERN', 'PERFORM', 'POINT', 'POS', 'PRIMARY', 'PRINT',
+ 'PROCEDURE', 'PROGRAM', 'PUT', 'READ', 'RECORD', 'RECORD-COUNT',
+ 'RECORD-LENGTH', 'REFRESH', 'RELEASE', 'RENUM', 'REPEAT', 'REPORT',
+ 'REPORT-INPUT', 'RESHOW', 'RESTART', 'RETRIEVE', 'RETURN-CODE',
+ 'ROLLBACK', 'ROW', 'S', 'SCREEN', 'SEARCH', 'SECONDARY', 'SELECT',
+ 'SEQUENCE', 'SIZE', 'SKIP', 'SOKAKU', 'SORT', 'SQL', 'STOP', 'SUM',
+ 'SYSDATE', 'SYSDATE-LONG', 'SYSIN', 'SYSIPT', 'SYSLST', 'SYSPRINT',
+ 'SYSSNAP', 'SYSTIME', 'TALLY', 'TERM-COLUMNS', 'TERM-NAME',
+ 'TERM-ROWS', 'TERMINATION', 'TITLE', 'TO', 'TRANSFER', 'TRC',
+ 'UNIQUE', 'UNTIL', 'UPDATE', 'UPPERCASE', 'USER', 'USERID', 'VALUE',
+ 'VERIFY', 'W', 'WHEN', 'WHILE', 'WORK', 'WRITE', 'X', 'XDM', 'XRST'
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\*.*\n', Comment.Single),
+ (r'\n+', Whitespace),
+ # Macro argument
+ (r'&' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+\.', Name.Variable,
+ 'after_macro_argument'),
+ # Macro call
+ (r'%' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Variable),
+ (r'(FILE|MACRO|REPORT)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'after_declaration'),
+ (r'(JOB|PARM)' + r'(' + _DELIMITER_PATTERN + r')',
+ bygroups(Keyword.Declaration, Operator)),
+ (words(_KEYWORDS, suffix=_DELIMITER_PATTERN_CAPTURE),
+ bygroups(Keyword.Reserved, Operator)),
+ (_OPERATORS_PATTERN, Operator),
+ # Procedure declaration
+ (r'(' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+)(\s*)(\.?)(\s*)(PROC)(\s*\n)',
+ bygroups(Name.Function, Whitespace, Operator, Whitespace,
+ Keyword.Declaration, Whitespace)),
+ (r'[0-9]+\.[0-9]*', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r"'(''|[^'])*'", String),
+ (r'\s+', Whitespace),
+ # Everything else just belongs to a name
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
+ ],
+ 'after_declaration': [
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function),
+ default('#pop'),
+ ],
+ 'after_macro_argument': [
+ (r'\*.*\n', Comment.Single, '#pop'),
+ (r'\s+', Whitespace, '#pop'),
+ (_OPERATORS_PATTERN, Operator, '#pop'),
+ (r"'(''|[^'])*'", String, '#pop'),
+ # Everything else just belongs to a name
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
+ ],
+ }
+ _COMMENT_LINE_REGEX = re.compile(r'^\s*\*')
+ _MACRO_HEADER_REGEX = re.compile(r'^\s*MACRO')
+
+ def analyse_text(text):
+ """
+ Perform a structural analysis for basic Easytrieve constructs.
+ """
+ result = 0.0
+ lines = text.split('\n')
+ hasEndProc = False
+ hasHeaderComment = False
+ hasFile = False
+ hasJob = False
+ hasProc = False
+ hasParm = False
+ hasReport = False
+
+ def isCommentLine(line):
+ return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
+
+ def isEmptyLine(line):
+ return not bool(line.strip())
+
+ # Remove possible empty lines and header comments.
+ while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
+ if not isEmptyLine(lines[0]):
+ hasHeaderComment = True
+ del lines[0]
+
+ if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
+ # Looks like an Easytrieve macro.
+ result = 0.4
+ if hasHeaderComment:
+ result += 0.4
+ else:
+ # Scan the source for lines starting with indicators.
+ for line in lines:
+ words = line.split()
+ if (len(words) >= 2):
+ firstWord = words[0]
+ if not hasReport:
+ if not hasJob:
+ if not hasFile:
+ if not hasParm:
+ if firstWord == 'PARM':
+ hasParm = True
+ if firstWord == 'FILE':
+ hasFile = True
+ if firstWord == 'JOB':
+ hasJob = True
+ elif firstWord == 'PROC':
+ hasProc = True
+ elif firstWord == 'END-PROC':
+ hasEndProc = True
+ elif firstWord == 'REPORT':
+ hasReport = True
+
+ # Weight the findings.
+ if hasJob and (hasProc == hasEndProc):
+ if hasHeaderComment:
+ result += 0.1
+ if hasParm:
+ if hasProc:
+ # Found PARM, JOB and PROC/END-PROC:
+ # pretty sure this is Easytrieve.
+ result += 0.8
+ else:
+ # Found PARAM and JOB: probably this is Easytrieve
+ result += 0.5
+ else:
+ # Found JOB and possibly other keywords: might be Easytrieve
+ result += 0.11
+ if hasParm:
+ # Note: PARAM is not a proper English word, so this is
+ # regarded a much better indicator for Easytrieve than
+ # the other words.
+ result += 0.2
+ if hasFile:
+ result += 0.01
+ if hasReport:
+ result += 0.01
+ assert 0.0 <= result <= 1.0
+ return result
+
+
+class JclLexer(RegexLexer):
+ """
+ Job Control Language (JCL)
+ is a scripting language used on mainframe platforms to instruct the system
+ on how to run a batch job or start a subsystem. It is somewhat
+ comparable to MS DOS batch and Unix shell scripts.
+
+ .. versionadded:: 2.1
+ """
+ name = 'JCL'
+ aliases = ['jcl']
+ filenames = ['*.jcl']
+ mimetypes = ['text/x-jcl']
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'//\*.*\n', Comment.Single),
+ (r'//', Keyword.Pseudo, 'statement'),
+ (r'/\*', Keyword.Pseudo, 'jes2_statement'),
+ # TODO: JES3 statement
+ (r'.*\n', Other) # Input text or inline code in any language.
+ ],
+ 'statement': [
+ (r'\s*\n', Whitespace, '#pop'),
+ (r'([a-z]\w*)(\s+)(exec|job)(\s*)',
+ bygroups(Name.Label, Whitespace, Keyword.Reserved, Whitespace),
+ 'option'),
+ (r'[a-z]\w*', Name.Variable, 'statement_command'),
+ (r'\s+', Whitespace, 'statement_command'),
+ ],
+ 'statement_command': [
+ (r'\s+(command|cntl|dd|endctl|endif|else|include|jcllib|'
+ r'output|pend|proc|set|then|xmit)\s+', Keyword.Reserved, 'option'),
+ include('option')
+ ],
+ 'jes2_statement': [
+ (r'\s*\n', Whitespace, '#pop'),
+ (r'\$', Keyword, 'option'),
+ (r'\b(jobparam|message|netacct|notify|output|priority|route|'
+ r'setup|signoff|xeq|xmit)\b', Keyword, 'option'),
+ ],
+ 'option': [
+ # (r'\n', Text, 'root'),
+ (r'\*', Name.Builtin),
+ (r'[\[\](){}<>;,]', Punctuation),
+ (r'[-+*/=&%]', Operator),
+ (r'[a-z_]\w*', Name),
+ (r'\d+\.\d*', Number.Float),
+ (r'\.\d+', Number.Float),
+ (r'\d+', Number.Integer),
+ (r"'", String, 'option_string'),
+ (r'[ \t]+', Whitespace, 'option_comment'),
+ (r'\.', Punctuation),
+ ],
+ 'option_string': [
+ (r"(\n)(//)", bygroups(Text, Keyword.Pseudo)),
+ (r"''", String),
+ (r"[^']", String),
+ (r"'", String, '#pop'),
+ ],
+ 'option_comment': [
+ # (r'\n', Text, 'root'),
+ (r'.+', Comment.Single),
+ ]
+ }
+
+ _JOB_HEADER_PATTERN = re.compile(r'^//[a-z#$@][a-z0-9#$@]{0,7}\s+job(\s+.*)?$',
+ re.IGNORECASE)
+
+ def analyse_text(text):
+ """
+ Recognize JCL job by header.
+ """
+ result = 0.0
+ lines = text.split('\n')
+ if len(lines) > 0:
+ if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
+ result = 1.0
+ assert 0.0 <= result <= 1.0
+ return result
+
+
+class MiniScriptLexer(RegexLexer):
+ """
+ For MiniScript source code.
+
+ .. versionadded:: 2.6
+ """
+
+ name = 'MiniScript'
+ url = 'https://miniscript.org'
+ aliases = ['miniscript', 'ms']
+ filenames = ['*.ms']
+ mimetypes = ['text/x-minicript', 'application/x-miniscript']
+
+ tokens = {
+ 'root': [
+ (r'#!(.*?)$', Comment.Preproc),
+ default('base'),
+ ],
+ 'base': [
+ ('//.*$', Comment.Single),
+ (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number),
+ (r'(?i)\d+e[+-]?\d+', Number),
+ (r'\d+', Number),
+ (r'\n', Text),
+ (r'[^\S\n]+', Text),
+ (r'"', String, 'string_double'),
+ (r'(==|!=|<=|>=|[=+\-*/%^<>.:])', Operator),
+ (r'[;,\[\]{}()]', Punctuation),
+ (words((
+ 'break', 'continue', 'else', 'end', 'for', 'function', 'if',
+ 'in', 'isa', 'then', 'repeat', 'return', 'while'), suffix=r'\b'),
+ Keyword),
+ (words((
+ 'abs', 'acos', 'asin', 'atan', 'ceil', 'char', 'cos', 'floor',
+ 'log', 'round', 'rnd', 'pi', 'sign', 'sin', 'sqrt', 'str', 'tan',
+ 'hasIndex', 'indexOf', 'len', 'val', 'code', 'remove', 'lower',
+ 'upper', 'replace', 'split', 'indexes', 'values', 'join', 'sum',
+ 'sort', 'shuffle', 'push', 'pop', 'pull', 'range',
+ 'print', 'input', 'time', 'wait', 'locals', 'globals', 'outer',
+ 'yield'), suffix=r'\b'),
+ Name.Builtin),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(and|or|not|new)\b', Operator.Word),
+ (r'(self|super|__isa)\b', Name.Builtin.Pseudo),
+ (r'[a-zA-Z_]\w*', Name.Variable)
+ ],
+ 'string_double': [
+ (r'[^"\n]+', String),
+ (r'""', String),
+ (r'"', String, '#pop'),
+ (r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
+ ]
+ }
diff --git a/pygments/lexers/sgf.py b/pygments/lexers/sgf.py
new file mode 100644
index 0000000..865d55c
--- /dev/null
+++ b/pygments/lexers/sgf.py
@@ -0,0 +1,60 @@
+"""
+ pygments.lexers.sgf
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Smart Game Format (sgf) file format.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Name, Literal, String, Punctuation, Whitespace
+
+__all__ = ["SmartGameFormatLexer"]
+
+
+class SmartGameFormatLexer(RegexLexer):
+ """
+ Lexer for Smart Game Format (sgf) file format.
+
+ The format is used to store game records of board games for two players
+ (mainly Go game).
+
+ .. versionadded:: 2.4
+ """
+ name = 'SmartGameFormat'
+ url = 'https://www.red-bean.com/sgf/'
+ aliases = ['sgf']
+ filenames = ['*.sgf']
+
+ tokens = {
+ 'root': [
+ (r'[():;]+', Punctuation),
+ # tokens:
+ (r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|'
+ r'DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|'
+ r'KO|LB|LN|LT|L|MA|MN|M|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|'
+ r'RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|VW|'
+ r'V|[BW]|C)',
+ Name.Builtin),
+ # number:
+ (r'(\[)([0-9.]+)(\])',
+ bygroups(Punctuation, Literal.Number, Punctuation)),
+ # date:
+ (r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])',
+ bygroups(Punctuation, Literal.Date, Punctuation)),
+ # point:
+ (r'(\[)([a-z]{2})(\])',
+ bygroups(Punctuation, String, Punctuation)),
+ # double points:
+ (r'(\[)([a-z]{2})(:)([a-z]{2})(\])',
+ bygroups(Punctuation, String, Punctuation, String, Punctuation)),
+
+ (r'(\[)([\w\s#()+,\-.:?]+)(\])',
+ bygroups(Punctuation, String, Punctuation)),
+ (r'(\[)(\s.*)(\])',
+ bygroups(Punctuation, Whitespace, Punctuation)),
+ (r'\s+', Whitespace)
+ ],
+ }
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
new file mode 100644
index 0000000..d3fb8a1
--- /dev/null
+++ b/pygments/lexers/shell.py
@@ -0,0 +1,918 @@
+"""
+ pygments.lexers.shell
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for various shells.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, \
+ include, default, this, using, words, line_re
+from pygments.token import Punctuation, Whitespace, \
+ Text, Comment, Operator, Keyword, Name, String, Number, Generic
+from pygments.util import shebang_matches
+
+__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
+ 'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer',
+ 'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer',
+ 'ExeclineLexer']
+
+
+class BashLexer(RegexLexer):
+ """
+ Lexer for (ba|k|z|)sh shell scripts.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'Bash'
+ aliases = ['bash', 'sh', 'ksh', 'zsh', 'shell']
+ filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
+ '*.exheres-0', '*.exlib', '*.zsh',
+ '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc',
+ '.kshrc', 'kshrc',
+ 'PKGBUILD']
+ mimetypes = ['application/x-sh', 'application/x-shellscript', 'text/x-shellscript']
+
+ tokens = {
+ 'root': [
+ include('basic'),
+ (r'`', String.Backtick, 'backticks'),
+ include('data'),
+ include('interp'),
+ ],
+ 'interp': [
+ (r'\$\(\(', Keyword, 'math'),
+ (r'\$\(', Keyword, 'paren'),
+ (r'\$\{#?', String.Interpol, 'curly'),
+ (r'\$[a-zA-Z_]\w*', Name.Variable), # user variable
+ (r'\$(?:\d+|[#$?!_*@-])', Name.Variable), # builtin
+ (r'\$', Text),
+ ],
+ 'basic': [
+ (r'\b(if|fi|else|while|in|do|done|for|then|return|function|case|'
+ r'select|continue|until|esac|elif)(\s*)\b',
+ bygroups(Keyword, Whitespace)),
+ (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
+ r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
+ r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
+ r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
+ r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
+ r'ulimit|umask|unalias|unset|wait)(?=[\s)`])',
+ Name.Builtin),
+ (r'\A#!.+\n', Comment.Hashbang),
+ (r'#.*\n', Comment.Single),
+ (r'\\[\w\W]', String.Escape),
+ (r'(\b\w+)(\s*)(\+?=)', bygroups(Name.Variable, Whitespace, Operator)),
+ (r'[\[\]{}()=]', Operator),
+ (r'<<<', Operator), # here-string
+ (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+ (r'&&|\|\|', Operator),
+ ],
+ 'data': [
+ (r'(?s)\$?"(\\.|[^"\\$])*"', String.Double),
+ (r'"', String.Double, 'string'),
+ (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+ (r"(?s)'.*?'", String.Single),
+ (r';', Punctuation),
+ (r'&', Punctuation),
+ (r'\|', Punctuation),
+ (r'\s+', Whitespace),
+ (r'\d+\b', Number),
+ (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
+ (r'<', Text),
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double),
+ include('interp'),
+ ],
+ 'curly': [
+ (r'\}', String.Interpol, '#pop'),
+ (r':-', Keyword),
+ (r'\w+', Name.Variable),
+ (r'[^}:"\'`$\\]+', Punctuation),
+ (r':', Punctuation),
+ include('root'),
+ ],
+ 'paren': [
+ (r'\)', Keyword, '#pop'),
+ include('root'),
+ ],
+ 'math': [
+ (r'\)\)', Keyword, '#pop'),
+ (r'[-+*/%^|&]|\*\*|\|\|', Operator),
+ (r'\d+#\d+', Number),
+ (r'\d+#(?! )', Number),
+ (r'\d+', Number),
+ include('root'),
+ ],
+ 'backticks': [
+ (r'`', String.Backtick, '#pop'),
+ include('root'),
+ ],
+ }
+
+ def analyse_text(text):
+ if shebang_matches(text, r'(ba|z|)sh'):
+ return 1
+ if text.startswith('$ '):
+ return 0.2
+
+
+class SlurmBashLexer(BashLexer):
+ """
+ Lexer for (ba|k|z|)sh Slurm scripts.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Slurm'
+ aliases = ['slurm', 'sbatch']
+ filenames = ['*.sl']
+ mimetypes = []
+ EXTRA_KEYWORDS = {'srun'}
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in BashLexer.get_tokens_unprocessed(self, text):
+ if token is Text and value in self.EXTRA_KEYWORDS:
+ yield index, Name.Builtin, value
+ elif token is Comment.Single and 'SBATCH' in value:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+
+class ShellSessionBaseLexer(Lexer):
+ """
+ Base lexer for shell sessions.
+
+ .. versionadded:: 2.1
+ """
+
+ _bare_continuation = False
+ _venv = re.compile(r'^(\([^)]*\))(\s*)')
+
+ def get_tokens_unprocessed(self, text):
+ innerlexer = self._innerLexerCls(**self.options)
+
+ pos = 0
+ curcode = ''
+ insertions = []
+ backslash_continuation = False
+
+ for match in line_re.finditer(text):
+ line = match.group()
+
+ venv_match = self._venv.match(line)
+ if venv_match:
+ venv = venv_match.group(1)
+ venv_whitespace = venv_match.group(2)
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt.VirtualEnv, venv)]))
+ if venv_whitespace:
+ insertions.append((len(curcode),
+ [(0, Text, venv_whitespace)]))
+ line = line[venv_match.end():]
+
+ m = self._ps1rgx.match(line)
+ if m:
+ # To support output lexers (say diff output), the output
+ # needs to be broken by prompts whenever the output lexer
+ # changes.
+ if not insertions:
+ pos = match.start()
+
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, m.group(1))]))
+ curcode += m.group(2)
+ backslash_continuation = curcode.endswith('\\\n')
+ elif backslash_continuation:
+ if line.startswith(self._ps2):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt,
+ line[:len(self._ps2)])]))
+ curcode += line[len(self._ps2):]
+ else:
+ curcode += line
+ backslash_continuation = curcode.endswith('\\\n')
+ elif self._bare_continuation and line.startswith(self._ps2):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt,
+ line[:len(self._ps2)])]))
+ curcode += line[len(self._ps2):]
+ else:
+ if insertions:
+ toks = innerlexer.get_tokens_unprocessed(curcode)
+ for i, t, v in do_insertions(insertions, toks):
+ yield pos+i, t, v
+ yield match.start(), Generic.Output, line
+ insertions = []
+ curcode = ''
+ if insertions:
+ for i, t, v in do_insertions(insertions,
+ innerlexer.get_tokens_unprocessed(curcode)):
+ yield pos+i, t, v
+
+
+class BashSessionLexer(ShellSessionBaseLexer):
+ """
+ Lexer for Bash shell sessions, i.e. command lines, including a
+ prompt, interspersed with output.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Bash Session'
+ aliases = ['console', 'shell-session']
+ filenames = ['*.sh-session', '*.shell-session']
+ mimetypes = ['application/x-shell-session', 'application/x-sh-session']
+
+ _innerLexerCls = BashLexer
+ _ps1rgx = re.compile(
+ r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \
+ r'?|\[\S+[@:][^\n]+\].+))\s*[$#%]\s*)(.*\n?)')
+ _ps2 = '> '
+
+
+class BatchLexer(RegexLexer):
+ """
+ Lexer for the DOS/Windows Batch file format.
+
+ .. versionadded:: 0.7
+ """
+ name = 'Batchfile'
+ aliases = ['batch', 'bat', 'dosbatch', 'winbatch']
+ filenames = ['*.bat', '*.cmd']
+ mimetypes = ['application/x-dos-batch']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ _nl = r'\n\x1a'
+ _punct = r'&<>|'
+ _ws = r'\t\v\f\r ,;=\xa0'
+ _nlws = r'\s\x1a\xa0,;='
+ _space = r'(?:(?:(?:\^[%s])?[%s])+)' % (_nl, _ws)
+ _keyword_terminator = (r'(?=(?:\^[%s]?)?[%s+./:[\\\]]|[%s%s(])' %
+ (_nl, _ws, _nl, _punct))
+ _token_terminator = r'(?=\^?[%s]|[%s%s])' % (_ws, _punct, _nl)
+ _start_label = r'((?:(?<=^[^:])|^[^:]?)[%s]*)(:)' % _ws
+ _label = r'(?:(?:[^%s%s+:^]|\^[%s]?[\w\W])*)' % (_nlws, _punct, _nl)
+ _label_compound = r'(?:(?:[^%s%s+:^)]|\^[%s]?[^)])*)' % (_nlws, _punct, _nl)
+ _number = r'(?:-?(?:0[0-7]+|0x[\da-f]+|\d+)%s)' % _token_terminator
+ _opword = r'(?:equ|geq|gtr|leq|lss|neq)'
+ _string = r'(?:"[^%s"]*(?:"|(?=[%s])))' % (_nl, _nl)
+ _variable = (r'(?:(?:%%(?:\*|(?:~[a-z]*(?:\$[^:]+:)?)?\d|'
+ r'[^%%:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:[^%%%s^]|'
+ r'\^[^%%%s])[^=%s]*=(?:[^%%%s^]|\^[^%%%s])*)?)?%%))|'
+ r'(?:\^?![^!:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:'
+ r'[^!%s^]|\^[^!%s])[^=%s]*=(?:[^!%s^]|\^[^!%s])*)?)?\^?!))' %
+ (_nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl))
+ _core_token = r'(?:(?:(?:\^[%s]?)?[^"%s%s])+)' % (_nl, _nlws, _punct)
+ _core_token_compound = r'(?:(?:(?:\^[%s]?)?[^"%s%s)])+)' % (_nl, _nlws, _punct)
+ _token = r'(?:[%s]+|%s)' % (_punct, _core_token)
+ _token_compound = r'(?:[%s]+|%s)' % (_punct, _core_token_compound)
+ _stoken = (r'(?:[%s]+|(?:%s|%s|%s)+)' %
+ (_punct, _string, _variable, _core_token))
+
+ def _make_begin_state(compound, _core_token=_core_token,
+ _core_token_compound=_core_token_compound,
+ _keyword_terminator=_keyword_terminator,
+ _nl=_nl, _punct=_punct, _string=_string,
+ _space=_space, _start_label=_start_label,
+ _stoken=_stoken, _token_terminator=_token_terminator,
+ _variable=_variable, _ws=_ws):
+ rest = '(?:%s|%s|[^"%%%s%s%s])*' % (_string, _variable, _nl, _punct,
+ ')' if compound else '')
+ rest_of_line = r'(?:(?:[^%s^]|\^[%s]?[\w\W])*)' % (_nl, _nl)
+ rest_of_line_compound = r'(?:(?:[^%s^)]|\^[%s]?[^)])*)' % (_nl, _nl)
+ set_space = r'((?:(?:\^[%s]?)?[^\S\n])*)' % _nl
+ suffix = ''
+ if compound:
+ _keyword_terminator = r'(?:(?=\))|%s)' % _keyword_terminator
+ _token_terminator = r'(?:(?=\))|%s)' % _token_terminator
+ suffix = '/compound'
+ return [
+ ((r'\)', Punctuation, '#pop') if compound else
+ (r'\)((?=\()|%s)%s' % (_token_terminator, rest_of_line),
+ Comment.Single)),
+ (r'(?=%s)' % _start_label, Text, 'follow%s' % suffix),
+ (_space, using(this, state='text')),
+ include('redirect%s' % suffix),
+ (r'[%s]+' % _nl, Text),
+ (r'\(', Punctuation, 'root/compound'),
+ (r'@+', Punctuation),
+ (r'((?:for|if|rem)(?:(?=(?:\^[%s]?)?/)|(?:(?!\^)|'
+ r'(?<=m))(?:(?=\()|%s)))(%s?%s?(?:\^[%s]?)?/(?:\^[%s]?)?\?)' %
+ (_nl, _token_terminator, _space,
+ _core_token_compound if compound else _core_token, _nl, _nl),
+ bygroups(Keyword, using(this, state='text')),
+ 'follow%s' % suffix),
+ (r'(goto%s)(%s(?:\^[%s]?)?/(?:\^[%s]?)?\?%s)' %
+ (_keyword_terminator, rest, _nl, _nl, rest),
+ bygroups(Keyword, using(this, state='text')),
+ 'follow%s' % suffix),
+ (words(('assoc', 'break', 'cd', 'chdir', 'cls', 'color', 'copy',
+ 'date', 'del', 'dir', 'dpath', 'echo', 'endlocal', 'erase',
+ 'exit', 'ftype', 'keys', 'md', 'mkdir', 'mklink', 'move',
+ 'path', 'pause', 'popd', 'prompt', 'pushd', 'rd', 'ren',
+ 'rename', 'rmdir', 'setlocal', 'shift', 'start', 'time',
+ 'title', 'type', 'ver', 'verify', 'vol'),
+ suffix=_keyword_terminator), Keyword, 'follow%s' % suffix),
+ (r'(call)(%s?)(:)' % _space,
+ bygroups(Keyword, using(this, state='text'), Punctuation),
+ 'call%s' % suffix),
+ (r'call%s' % _keyword_terminator, Keyword),
+ (r'(for%s(?!\^))(%s)(/f%s)' %
+ (_token_terminator, _space, _token_terminator),
+ bygroups(Keyword, using(this, state='text'), Keyword),
+ ('for/f', 'for')),
+ (r'(for%s(?!\^))(%s)(/l%s)' %
+ (_token_terminator, _space, _token_terminator),
+ bygroups(Keyword, using(this, state='text'), Keyword),
+ ('for/l', 'for')),
+ (r'for%s(?!\^)' % _token_terminator, Keyword, ('for2', 'for')),
+ (r'(goto%s)(%s?)(:?)' % (_keyword_terminator, _space),
+ bygroups(Keyword, using(this, state='text'), Punctuation),
+ 'label%s' % suffix),
+ (r'(if(?:(?=\()|%s)(?!\^))(%s?)((?:/i%s)?)(%s?)((?:not%s)?)(%s?)' %
+ (_token_terminator, _space, _token_terminator, _space,
+ _token_terminator, _space),
+ bygroups(Keyword, using(this, state='text'), Keyword,
+ using(this, state='text'), Keyword,
+ using(this, state='text')), ('(?', 'if')),
+ (r'rem(((?=\()|%s)%s?%s?.*|%s%s)' %
+ (_token_terminator, _space, _stoken, _keyword_terminator,
+ rest_of_line_compound if compound else rest_of_line),
+ Comment.Single, 'follow%s' % suffix),
+ (r'(set%s)%s(/a)' % (_keyword_terminator, set_space),
+ bygroups(Keyword, using(this, state='text'), Keyword),
+ 'arithmetic%s' % suffix),
+ (r'(set%s)%s((?:/p)?)%s((?:(?:(?:\^[%s]?)?[^"%s%s^=%s]|'
+ r'\^[%s]?[^"=])+)?)((?:(?:\^[%s]?)?=)?)' %
+ (_keyword_terminator, set_space, set_space, _nl, _nl, _punct,
+ ')' if compound else '', _nl, _nl),
+ bygroups(Keyword, using(this, state='text'), Keyword,
+ using(this, state='text'), using(this, state='variable'),
+ Punctuation),
+ 'follow%s' % suffix),
+ default('follow%s' % suffix)
+ ]
+
+ def _make_follow_state(compound, _label=_label,
+ _label_compound=_label_compound, _nl=_nl,
+ _space=_space, _start_label=_start_label,
+ _token=_token, _token_compound=_token_compound,
+ _ws=_ws):
+ suffix = '/compound' if compound else ''
+ state = []
+ if compound:
+ state.append((r'(?=\))', Text, '#pop'))
+ state += [
+ (r'%s([%s]*)(%s)(.*)' %
+ (_start_label, _ws, _label_compound if compound else _label),
+ bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)),
+ include('redirect%s' % suffix),
+ (r'(?=[%s])' % _nl, Text, '#pop'),
+ (r'\|\|?|&&?', Punctuation, '#pop'),
+ include('text')
+ ]
+ return state
+
+ def _make_arithmetic_state(compound, _nl=_nl, _punct=_punct,
+ _string=_string, _variable=_variable,
+ _ws=_ws, _nlws=_nlws):
+ op = r'=+\-*/!~'
+ state = []
+ if compound:
+ state.append((r'(?=\))', Text, '#pop'))
+ state += [
+ (r'0[0-7]+', Number.Oct),
+ (r'0x[\da-f]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'[(),]+', Punctuation),
+ (r'([%s]|%%|\^\^)+' % op, Operator),
+ (r'(%s|%s|(\^[%s]?)?[^()%s%%\^"%s%s]|\^[%s]?%s)+' %
+ (_string, _variable, _nl, op, _nlws, _punct, _nlws,
+ r'[^)]' if compound else r'[\w\W]'),
+ using(this, state='variable')),
+ (r'(?=[\x00|&])', Text, '#pop'),
+ include('follow')
+ ]
+ return state
+
+ def _make_call_state(compound, _label=_label,
+ _label_compound=_label_compound):
+ state = []
+ if compound:
+ state.append((r'(?=\))', Text, '#pop'))
+ state.append((r'(:?)(%s)' % (_label_compound if compound else _label),
+ bygroups(Punctuation, Name.Label), '#pop'))
+ return state
+
+ def _make_label_state(compound, _label=_label,
+ _label_compound=_label_compound, _nl=_nl,
+ _punct=_punct, _string=_string, _variable=_variable):
+ state = []
+ if compound:
+ state.append((r'(?=\))', Text, '#pop'))
+ state.append((r'(%s?)((?:%s|%s|\^[%s]?%s|[^"%%^%s%s%s])*)' %
+ (_label_compound if compound else _label, _string,
+ _variable, _nl, r'[^)]' if compound else r'[\w\W]', _nl,
+ _punct, r')' if compound else ''),
+ bygroups(Name.Label, Comment.Single), '#pop'))
+ return state
+
+ def _make_redirect_state(compound,
+ _core_token_compound=_core_token_compound,
+ _nl=_nl, _punct=_punct, _stoken=_stoken,
+ _string=_string, _space=_space,
+ _variable=_variable, _nlws=_nlws):
+ stoken_compound = (r'(?:[%s]+|(?:%s|%s|%s)+)' %
+ (_punct, _string, _variable, _core_token_compound))
+ return [
+ (r'((?:(?<=[%s])\d)?)(>>?&|<&)([%s]*)(\d)' %
+ (_nlws, _nlws),
+ bygroups(Number.Integer, Punctuation, Text, Number.Integer)),
+ (r'((?:(?<=[%s])(?<!\^[%s])\d)?)(>>?|<)(%s?%s)' %
+ (_nlws, _nl, _space, stoken_compound if compound else _stoken),
+ bygroups(Number.Integer, Punctuation, using(this, state='text')))
+ ]
+
+ tokens = {
+ 'root': _make_begin_state(False),
+ 'follow': _make_follow_state(False),
+ 'arithmetic': _make_arithmetic_state(False),
+ 'call': _make_call_state(False),
+ 'label': _make_label_state(False),
+ 'redirect': _make_redirect_state(False),
+ 'root/compound': _make_begin_state(True),
+ 'follow/compound': _make_follow_state(True),
+ 'arithmetic/compound': _make_arithmetic_state(True),
+ 'call/compound': _make_call_state(True),
+ 'label/compound': _make_label_state(True),
+ 'redirect/compound': _make_redirect_state(True),
+ 'variable-or-escape': [
+ (_variable, Name.Variable),
+ (r'%%%%|\^[%s]?(\^!|[\w\W])' % _nl, String.Escape)
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (_variable, Name.Variable),
+ (r'\^!|%%', String.Escape),
+ (r'[^"%%^%s]+|[%%^]' % _nl, String.Double),
+ default('#pop')
+ ],
+ 'sqstring': [
+ include('variable-or-escape'),
+ (r'[^%]+|%', String.Single)
+ ],
+ 'bqstring': [
+ include('variable-or-escape'),
+ (r'[^%]+|%', String.Backtick)
+ ],
+ 'text': [
+ (r'"', String.Double, 'string'),
+ include('variable-or-escape'),
+ (r'[^"%%^%s%s\d)]+|.' % (_nlws, _punct), Text)
+ ],
+ 'variable': [
+ (r'"', String.Double, 'string'),
+ include('variable-or-escape'),
+ (r'[^"%%^%s]+|.' % _nl, Name.Variable)
+ ],
+ 'for': [
+ (r'(%s)(in)(%s)(\()' % (_space, _space),
+ bygroups(using(this, state='text'), Keyword,
+ using(this, state='text'), Punctuation), '#pop'),
+ include('follow')
+ ],
+ 'for2': [
+ (r'\)', Punctuation),
+ (r'(%s)(do%s)' % (_space, _token_terminator),
+ bygroups(using(this, state='text'), Keyword), '#pop'),
+ (r'[%s]+' % _nl, Text),
+ include('follow')
+ ],
+ 'for/f': [
+ (r'(")((?:%s|[^"])*?")([%s]*)(\))' % (_variable, _nlws),
+ bygroups(String.Double, using(this, state='string'), Text,
+ Punctuation)),
+ (r'"', String.Double, ('#pop', 'for2', 'string')),
+ (r"('(?:%%%%|%s|[\w\W])*?')([%s]*)(\))" % (_variable, _nlws),
+ bygroups(using(this, state='sqstring'), Text, Punctuation)),
+ (r'(`(?:%%%%|%s|[\w\W])*?`)([%s]*)(\))' % (_variable, _nlws),
+ bygroups(using(this, state='bqstring'), Text, Punctuation)),
+ include('for2')
+ ],
+ 'for/l': [
+ (r'-?\d+', Number.Integer),
+ include('for2')
+ ],
+ 'if': [
+ (r'((?:cmdextversion|errorlevel)%s)(%s)(\d+)' %
+ (_token_terminator, _space),
+ bygroups(Keyword, using(this, state='text'),
+ Number.Integer), '#pop'),
+ (r'(defined%s)(%s)(%s)' % (_token_terminator, _space, _stoken),
+ bygroups(Keyword, using(this, state='text'),
+ using(this, state='variable')), '#pop'),
+ (r'(exist%s)(%s%s)' % (_token_terminator, _space, _stoken),
+ bygroups(Keyword, using(this, state='text')), '#pop'),
+ (r'(%s%s)(%s)(%s%s)' % (_number, _space, _opword, _space, _number),
+ bygroups(using(this, state='arithmetic'), Operator.Word,
+ using(this, state='arithmetic')), '#pop'),
+ (_stoken, using(this, state='text'), ('#pop', 'if2')),
+ ],
+ 'if2': [
+ (r'(%s?)(==)(%s?%s)' % (_space, _space, _stoken),
+ bygroups(using(this, state='text'), Operator,
+ using(this, state='text')), '#pop'),
+ (r'(%s)(%s)(%s%s)' % (_space, _opword, _space, _stoken),
+ bygroups(using(this, state='text'), Operator.Word,
+ using(this, state='text')), '#pop')
+ ],
+ '(?': [
+ (_space, using(this, state='text')),
+ (r'\(', Punctuation, ('#pop', 'else?', 'root/compound')),
+ default('#pop')
+ ],
+ 'else?': [
+ (_space, using(this, state='text')),
+ (r'else%s' % _token_terminator, Keyword, '#pop'),
+ default('#pop')
+ ]
+ }
+
+
+class MSDOSSessionLexer(ShellSessionBaseLexer):
+ """
+ Lexer for MS DOS shell sessions, i.e. command lines, including a
+ prompt, interspersed with output.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'MSDOS Session'
+ aliases = ['doscon']
+ filenames = []
+ mimetypes = []
+
+ _innerLexerCls = BatchLexer
+ _ps1rgx = re.compile(r'^([^>]*>)(.*\n?)')
+ _ps2 = 'More? '
+
+
+class TcshLexer(RegexLexer):
+ """
+ Lexer for tcsh scripts.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'Tcsh'
+ aliases = ['tcsh', 'csh']
+ filenames = ['*.tcsh', '*.csh']
+ mimetypes = ['application/x-csh']
+
+ tokens = {
+ 'root': [
+ include('basic'),
+ (r'\$\(', Keyword, 'paren'),
+ (r'\$\{#?', Keyword, 'curly'),
+ (r'`', String.Backtick, 'backticks'),
+ include('data'),
+ ],
+ 'basic': [
+ (r'\b(if|endif|else|while|then|foreach|case|default|'
+ r'continue|goto|breaksw|end|switch|endsw)\s*\b',
+ Keyword),
+ (r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
+ r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
+ r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
+ r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
+ r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
+ r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
+ r'source|stop|suspend|source|suspend|telltc|time|'
+ r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
+ r'ver|wait|warp|watchlog|where|which)\s*\b',
+ Name.Builtin),
+ (r'#.*', Comment),
+ (r'\\[\w\W]', String.Escape),
+ (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
+ (r'[\[\]{}()=]+', Operator),
+ (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+ (r';', Punctuation),
+ ],
+ 'data': [
+ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
+ (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+ (r'\s+', Text),
+ (r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
+ (r'\d+(?= |\Z)', Number),
+ (r'\$#?(\w+|.)', Name.Variable),
+ ],
+ 'curly': [
+ (r'\}', Keyword, '#pop'),
+ (r':-', Keyword),
+ (r'\w+', Name.Variable),
+ (r'[^}:"\'`$]+', Punctuation),
+ (r':', Punctuation),
+ include('root'),
+ ],
+ 'paren': [
+ (r'\)', Keyword, '#pop'),
+ include('root'),
+ ],
+ 'backticks': [
+ (r'`', String.Backtick, '#pop'),
+ include('root'),
+ ],
+ }
+
+
+class TcshSessionLexer(ShellSessionBaseLexer):
+ """
+ Lexer for Tcsh sessions, i.e. command lines, including a
+ prompt, interspersed with output.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Tcsh Session'
+ aliases = ['tcshcon']
+ filenames = []
+ mimetypes = []
+
+ _innerLexerCls = TcshLexer
+ _ps1rgx = re.compile(r'^([^>]+>)(.*\n?)')
+ _ps2 = '? '
+
+
+class PowerShellLexer(RegexLexer):
+ """
+ For Windows PowerShell code.
+
+ .. versionadded:: 1.5
+ """
+ name = 'PowerShell'
+ aliases = ['powershell', 'pwsh', 'posh', 'ps1', 'psm1']
+ filenames = ['*.ps1', '*.psm1']
+ mimetypes = ['text/x-powershell']
+
+ flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
+
+ keywords = (
+ 'while validateset validaterange validatepattern validatelength '
+ 'validatecount until trap switch return ref process param parameter in '
+ 'if global: local: function foreach for finally filter end elseif else '
+ 'dynamicparam do default continue cmdletbinding break begin alias \\? '
+ '% #script #private #local #global mandatory parametersetname position '
+ 'valuefrompipeline valuefrompipelinebypropertyname '
+ 'valuefromremainingarguments helpmessage try catch throw').split()
+
+ operators = (
+ 'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
+ 'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
+ 'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
+ 'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
+ 'lt match ne not notcontains notlike notmatch or regex replace '
+ 'wildcard').split()
+
+ verbs = (
+ 'write where watch wait use update unregister unpublish unprotect '
+ 'unlock uninstall undo unblock trace test tee take sync switch '
+ 'suspend submit stop step start split sort skip show set send select '
+ 'search scroll save revoke resume restore restart resolve resize '
+ 'reset request repair rename remove register redo receive read push '
+ 'publish protect pop ping out optimize open new move mount merge '
+ 'measure lock limit join invoke install initialize import hide group '
+ 'grant get format foreach find export expand exit enter enable edit '
+ 'dismount disconnect disable deny debug cxnew copy convertto '
+ 'convertfrom convert connect confirm compress complete compare close '
+ 'clear checkpoint block backup assert approve aggregate add').split()
+
+ aliases_ = (
+ 'ac asnp cat cd cfs chdir clc clear clhy cli clp cls clv cnsn '
+ 'compare copy cp cpi cpp curl cvpa dbp del diff dir dnsn ebp echo epal '
+ 'epcsv epsn erase etsn exsn fc fhx fl foreach ft fw gal gbp gc gci gcm '
+ 'gcs gdr ghy gi gjb gl gm gmo gp gps gpv group gsn gsnp gsv gu gv gwmi '
+ 'h history icm iex ihy ii ipal ipcsv ipmo ipsn irm ise iwmi iwr kill lp '
+ 'ls man md measure mi mount move mp mv nal ndr ni nmo npssc nsn nv ogv '
+ 'oh popd ps pushd pwd r rbp rcjb rcsn rd rdr ren ri rjb rm rmdir rmo '
+ 'rni rnp rp rsn rsnp rujb rv rvpa rwmi sajb sal saps sasv sbp sc select '
+ 'set shcm si sl sleep sls sort sp spjb spps spsv start sujb sv swmi tee '
+ 'trcm type wget where wjb write').split()
+
+ commenthelp = (
+ 'component description example externalhelp forwardhelpcategory '
+ 'forwardhelptargetname functionality inputs link '
+ 'notes outputs parameter remotehelprunspace role synopsis').split()
+
+ tokens = {
+ 'root': [
+ # we need to count pairs of parentheses for correct highlight
+ # of '$(...)' blocks in strings
+ (r'\(', Punctuation, 'child'),
+ (r'\s+', Text),
+ (r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
+ bygroups(Comment, String.Doc, Comment)),
+ (r'#[^\n]*?$', Comment),
+ (r'(&lt;|<)#', Comment.Multiline, 'multline'),
+ (r'@"\n', String.Heredoc, 'heredoc-double'),
+ (r"@'\n.*?\n'@", String.Heredoc),
+ # escaped syntax
+ (r'`[\'"$@-]', Punctuation),
+ (r'"', String.Double, 'string'),
+ (r"'([^']|'')*'", String.Single),
+ (r'(\$|@@|@)((global|script|private|env):)?\w+',
+ Name.Variable),
+ (r'(%s)\b' % '|'.join(keywords), Keyword),
+ (r'-(%s)\b' % '|'.join(operators), Operator),
+ (r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin),
+ (r'(%s)\s' % '|'.join(aliases_), Name.Builtin),
+ (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s
+ (r'-[a-z_]\w*', Name),
+ (r'\w+', Name),
+ (r'[.,;:@{}\[\]$()=+*/\\&%!~?^`|<>-]', Punctuation),
+ ],
+ 'child': [
+ (r'\)', Punctuation, '#pop'),
+ include('root'),
+ ],
+ 'multline': [
+ (r'[^#&.]+', Comment.Multiline),
+ (r'#(>|&gt;)', Comment.Multiline, '#pop'),
+ (r'\.(%s)' % '|'.join(commenthelp), String.Doc),
+ (r'[#&.]', Comment.Multiline),
+ ],
+ 'string': [
+ (r"`[0abfnrtv'\"$`]", String.Escape),
+ (r'[^$`"]+', String.Double),
+ (r'\$\(', Punctuation, 'child'),
+ (r'""', String.Double),
+ (r'[`$]', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'heredoc-double': [
+ (r'\n"@', String.Heredoc, '#pop'),
+ (r'\$\(', Punctuation, 'child'),
+ (r'[^@\n]+"]', String.Heredoc),
+ (r".", String.Heredoc),
+ ]
+ }
+
+
+class PowerShellSessionLexer(ShellSessionBaseLexer):
+ """
+ Lexer for PowerShell sessions, i.e. command lines, including a
+ prompt, interspersed with output.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'PowerShell Session'
+ aliases = ['pwsh-session', 'ps1con']
+ filenames = []
+ mimetypes = []
+
+ _innerLexerCls = PowerShellLexer
+ _bare_continuation = True
+ _ps1rgx = re.compile(r'^((?:\[[^]]+\]: )?PS[^>]*> ?)(.*\n?)')
+ _ps2 = '> '
+
+
+class FishShellLexer(RegexLexer):
+ """
+ Lexer for Fish shell scripts.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Fish'
+ aliases = ['fish', 'fishshell']
+ filenames = ['*.fish', '*.load']
+ mimetypes = ['application/x-fish']
+
+ tokens = {
+ 'root': [
+ include('basic'),
+ include('data'),
+ include('interp'),
+ ],
+ 'interp': [
+ (r'\$\(\(', Keyword, 'math'),
+ (r'\(', Keyword, 'paren'),
+ (r'\$#?(\w+|.)', Name.Variable),
+ ],
+ 'basic': [
+ (r'\b(begin|end|if|else|while|break|for|in|return|function|block|'
+ r'case|continue|switch|not|and|or|set|echo|exit|pwd|true|false|'
+ r'cd|count|test)(\s*)\b',
+ bygroups(Keyword, Text)),
+ (r'\b(alias|bg|bind|breakpoint|builtin|command|commandline|'
+ r'complete|contains|dirh|dirs|emit|eval|exec|fg|fish|fish_config|'
+ r'fish_indent|fish_pager|fish_prompt|fish_right_prompt|'
+ r'fish_update_completions|fishd|funced|funcsave|functions|help|'
+ r'history|isatty|jobs|math|mimedb|nextd|open|popd|prevd|psub|'
+ r'pushd|random|read|set_color|source|status|trap|type|ulimit|'
+ r'umask|vared|fc|getopts|hash|kill|printf|time|wait)\s*\b(?!\.)',
+ Name.Builtin),
+ (r'#.*\n', Comment),
+ (r'\\[\w\W]', String.Escape),
+ (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
+ (r'[\[\]()=]', Operator),
+ (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+ ],
+ 'data': [
+ (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double),
+ (r'"', String.Double, 'string'),
+ (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+ (r"(?s)'.*?'", String.Single),
+ (r';', Punctuation),
+ (r'&|\||\^|<|>', Operator),
+ (r'\s+', Text),
+ (r'\d+(?= |\Z)', Number),
+ (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double),
+ include('interp'),
+ ],
+ 'paren': [
+ (r'\)', Keyword, '#pop'),
+ include('root'),
+ ],
+ 'math': [
+ (r'\)\)', Keyword, '#pop'),
+ (r'[-+*/%^|&]|\*\*|\|\|', Operator),
+ (r'\d+#\d+', Number),
+ (r'\d+#(?! )', Number),
+ (r'\d+', Number),
+ include('root'),
+ ],
+ }
+
+class ExeclineLexer(RegexLexer):
+ """
+ Lexer for Laurent Bercot's execline language
+ (https://skarnet.org/software/execline).
+
+ .. versionadded:: 2.7
+ """
+
+ name = 'execline'
+ aliases = ['execline']
+ filenames = ['*.exec']
+
+ tokens = {
+ 'root': [
+ include('basic'),
+ include('data'),
+ include('interp')
+ ],
+ 'interp': [
+ (r'\$\{', String.Interpol, 'curly'),
+ (r'\$[\w@#]+', Name.Variable), # user variable
+ (r'\$', Text),
+ ],
+ 'basic': [
+ (r'\b(background|backtick|cd|define|dollarat|elgetopt|'
+ r'elgetpositionals|elglob|emptyenv|envfile|exec|execlineb|'
+ r'exit|export|fdblock|fdclose|fdmove|fdreserve|fdswap|'
+ r'forbacktickx|foreground|forstdin|forx|getcwd|getpid|heredoc|'
+ r'homeof|if|ifelse|ifte|ifthenelse|importas|loopwhilex|'
+ r'multidefine|multisubstitute|pipeline|piperw|posix-cd|'
+ r'redirfd|runblock|shift|trap|tryexec|umask|unexport|wait|'
+ r'withstdinas)\b', Name.Builtin),
+ (r'\A#!.+\n', Comment.Hashbang),
+ (r'#.*\n', Comment.Single),
+ (r'[{}]', Operator)
+ ],
+ 'data': [
+ (r'(?s)"(\\.|[^"\\$])*"', String.Double),
+ (r'"', String.Double, 'string'),
+ (r'\s+', Text),
+ (r'[^\s{}$"\\]+', Text)
+ ],
+ 'string': [
+ (r'"', String.Double, '#pop'),
+ (r'(?s)(\\\\|\\.|[^"\\$])+', String.Double),
+ include('interp'),
+ ],
+ 'curly': [
+ (r'\}', String.Interpol, '#pop'),
+ (r'[\w#@]+', Name.Variable),
+ include('root')
+ ]
+
+ }
+
+ def analyse_text(text):
+ if shebang_matches(text, r'execlineb'):
+ return 1
diff --git a/pygments/lexers/sieve.py b/pygments/lexers/sieve.py
new file mode 100644
index 0000000..ab43db8
--- /dev/null
+++ b/pygments/lexers/sieve.py
@@ -0,0 +1,78 @@
+"""
+ pygments.lexers.sieve
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Sieve file format.
+
+ https://tools.ietf.org/html/rfc5228
+ https://tools.ietf.org/html/rfc5173
+ https://tools.ietf.org/html/rfc5229
+ https://tools.ietf.org/html/rfc5230
+ https://tools.ietf.org/html/rfc5232
+ https://tools.ietf.org/html/rfc5235
+ https://tools.ietf.org/html/rfc5429
+ https://tools.ietf.org/html/rfc8580
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Name, Literal, String, Text, Punctuation, \
+ Keyword
+
+__all__ = ["SieveLexer"]
+
+
+class SieveLexer(RegexLexer):
+ """
+ Lexer for sieve format.
+
+ .. versionadded:: 2.6
+ """
+ name = 'Sieve'
+ filenames = ['*.siv', '*.sieve']
+ aliases = ['sieve']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'[();,{}\[\]]', Punctuation),
+ # import:
+ (r'(?i)require',
+ Keyword.Namespace),
+ # tags:
+ (r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|'
+ r'count|days|detail|domain|fcc|flags|from|handle|importance|is|'
+ r'localpart|length|lowerfirst|lower|matches|message|mime|options|'
+ r'over|percent|quotewildcard|raw|regex|specialuse|subject|text|'
+ r'under|upperfirst|upper|value)',
+ bygroups(Name.Tag, Name.Tag)),
+ # tokens:
+ (r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|'
+ r'ereject|exists|false|fileinto|if|hasflag|header|keep|'
+ r'notify_method_capability|notify|not|redirect|reject|removeflag|'
+ r'setflag|size|spamtest|stop|string|true|vacation|virustest)',
+ Name.Builtin),
+ (r'(?i)set',
+ Keyword.Declaration),
+ # number:
+ (r'([0-9.]+)([kmgKMG])?',
+ bygroups(Literal.Number, Literal.Number)),
+ # comment:
+ (r'#.*$',
+ Comment.Single),
+ (r'/\*.*\*/',
+ Comment.Multiline),
+ # string:
+ (r'"[^"]*?"',
+ String),
+ # text block:
+ (r'text:',
+ Name.Tag, 'text'),
+ ],
+ 'text': [
+ (r'[^.].*?\n', String),
+ (r'^\.', Punctuation, "#pop"),
+ ]
+ }
diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py
new file mode 100644
index 0000000..e3434c5
--- /dev/null
+++ b/pygments/lexers/slash.py
@@ -0,0 +1,184 @@
+"""
+ pygments.lexers.slash
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the `Slash <https://github.com/arturadib/Slash-A>`_ programming
+ language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer
+from pygments.token import Name, Number, String, Comment, Punctuation, \
+ Other, Keyword, Operator, Whitespace
+
+__all__ = ['SlashLexer']
+
+
+class SlashLanguageLexer(ExtendedRegexLexer):
+ _nkw = r'(?=[^a-zA-Z_0-9])'
+
+ def move_state(new_state):
+ return ("#pop", new_state)
+
+ def right_angle_bracket(lexer, match, ctx):
+ if len(ctx.stack) > 1 and ctx.stack[-2] == "string":
+ ctx.stack.pop()
+ yield match.start(), String.Interpol, '}'
+ ctx.pos = match.end()
+ pass
+
+ tokens = {
+ "root": [
+ (r"<%=", Comment.Preproc, move_state("slash")),
+ (r"<%!!", Comment.Preproc, move_state("slash")),
+ (r"<%#.*?%>", Comment.Multiline),
+ (r"<%", Comment.Preproc, move_state("slash")),
+ (r".|\n", Other),
+ ],
+ "string": [
+ (r"\\", String.Escape, move_state("string_e")),
+ (r"\"", String, move_state("slash")),
+ (r"#\{", String.Interpol, "slash"),
+ (r'.|\n', String),
+ ],
+ "string_e": [
+ (r'n', String.Escape, move_state("string")),
+ (r't', String.Escape, move_state("string")),
+ (r'r', String.Escape, move_state("string")),
+ (r'e', String.Escape, move_state("string")),
+ (r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")),
+ (r'.', String.Escape, move_state("string")),
+ ],
+ "regexp": [
+ (r'}[a-z]*', String.Regex, move_state("slash")),
+ (r'\\(.|\n)', String.Regex),
+ (r'{', String.Regex, "regexp_r"),
+ (r'.|\n', String.Regex),
+ ],
+ "regexp_r": [
+ (r'}[a-z]*', String.Regex, "#pop"),
+ (r'\\(.|\n)', String.Regex),
+ (r'{', String.Regex, "regexp_r"),
+ ],
+ "slash": [
+ (r"%>", Comment.Preproc, move_state("root")),
+ (r"\"", String, move_state("string")),
+ (r"'[a-zA-Z0-9_]+", String),
+ (r'%r{', String.Regex, move_state("regexp")),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r"(#|//).*?\n", Comment.Single),
+ (r'-?[0-9]+e[+-]?[0-9]+', Number.Float),
+ (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
+ (r'-?[0-9]+', Number.Integer),
+ (r'nil'+_nkw, Name.Builtin),
+ (r'true'+_nkw, Name.Builtin),
+ (r'false'+_nkw, Name.Builtin),
+ (r'self'+_nkw, Name.Builtin),
+ (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)',
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'class'+_nkw, Keyword),
+ (r'extends'+_nkw, Keyword),
+ (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
+ bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)),
+ (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'def'+_nkw, Keyword),
+ (r'if'+_nkw, Keyword),
+ (r'elsif'+_nkw, Keyword),
+ (r'else'+_nkw, Keyword),
+ (r'unless'+_nkw, Keyword),
+ (r'for'+_nkw, Keyword),
+ (r'in'+_nkw, Keyword),
+ (r'while'+_nkw, Keyword),
+ (r'until'+_nkw, Keyword),
+ (r'and'+_nkw, Keyword),
+ (r'or'+_nkw, Keyword),
+ (r'not'+_nkw, Keyword),
+ (r'lambda'+_nkw, Keyword),
+ (r'try'+_nkw, Keyword),
+ (r'catch'+_nkw, Keyword),
+ (r'return'+_nkw, Keyword),
+ (r'next'+_nkw, Keyword),
+ (r'last'+_nkw, Keyword),
+ (r'throw'+_nkw, Keyword),
+ (r'use'+_nkw, Keyword),
+ (r'switch'+_nkw, Keyword),
+ (r'\\', Keyword),
+ (r'λ', Keyword),
+ (r'__FILE__'+_nkw, Name.Builtin.Pseudo),
+ (r'__LINE__'+_nkw, Name.Builtin.Pseudo),
+ (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant),
+ (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name),
+ (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance),
+ (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class),
+ (r'\(', Punctuation),
+ (r'\)', Punctuation),
+ (r'\[', Punctuation),
+ (r'\]', Punctuation),
+ (r'\{', Punctuation),
+ (r'\}', right_angle_bracket),
+ (r';', Punctuation),
+ (r',', Punctuation),
+ (r'<<=', Operator),
+ (r'>>=', Operator),
+ (r'<<', Operator),
+ (r'>>', Operator),
+ (r'==', Operator),
+ (r'!=', Operator),
+ (r'=>', Operator),
+ (r'=', Operator),
+ (r'<=>', Operator),
+ (r'<=', Operator),
+ (r'>=', Operator),
+ (r'<', Operator),
+ (r'>', Operator),
+ (r'\+\+', Operator),
+ (r'\+=', Operator),
+ (r'-=', Operator),
+ (r'\*\*=', Operator),
+ (r'\*=', Operator),
+ (r'\*\*', Operator),
+ (r'\*', Operator),
+ (r'/=', Operator),
+ (r'\+', Operator),
+ (r'-', Operator),
+ (r'/', Operator),
+ (r'%=', Operator),
+ (r'%', Operator),
+ (r'^=', Operator),
+ (r'&&=', Operator),
+ (r'&=', Operator),
+ (r'&&', Operator),
+ (r'&', Operator),
+ (r'\|\|=', Operator),
+ (r'\|=', Operator),
+ (r'\|\|', Operator),
+ (r'\|', Operator),
+ (r'!', Operator),
+ (r'\.\.\.', Operator),
+ (r'\.\.', Operator),
+ (r'\.', Operator),
+ (r'::', Operator),
+ (r':', Operator),
+ (r'(\s|\n)+', Whitespace),
+ (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable),
+ ],
+ }
+
+
+class SlashLexer(DelegatingLexer):
+ """
+ Lexer for the Slash programming language.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Slash'
+ aliases = ['slash']
+ filenames = ['*.sla']
+
+ def __init__(self, **options):
+ from pygments.lexers.web import HtmlLexer
+ super().__init__(HtmlLexer, SlashLanguageLexer, **options)
diff --git a/pygments/lexers/smalltalk.py b/pygments/lexers/smalltalk.py
new file mode 100644
index 0000000..ca41c07
--- /dev/null
+++ b/pygments/lexers/smalltalk.py
@@ -0,0 +1,196 @@
+"""
+ pygments.lexers.smalltalk
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Smalltalk and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['SmalltalkLexer', 'NewspeakLexer']
+
+
+class SmalltalkLexer(RegexLexer):
+ """
+ For Smalltalk syntax.
+ Contributed by Stefan Matthias Aust.
+ Rewritten by Nils Winter.
+
+ .. versionadded:: 0.10
+ """
+ name = 'Smalltalk'
+ url = 'http://www.smalltalk.org/'
+ filenames = ['*.st']
+ aliases = ['smalltalk', 'squeak', 'st']
+ mimetypes = ['text/x-smalltalk']
+
+ tokens = {
+ 'root': [
+ (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
+ include('squeak fileout'),
+ include('whitespaces'),
+ include('method definition'),
+ (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
+ include('objects'),
+ (r'\^|\:=|\_', Operator),
+ # temporaries
+ (r'[\]({}.;!]', Text),
+ ],
+ 'method definition': [
+ # Not perfect can't allow whitespaces at the beginning and the
+ # without breaking everything
+ (r'([a-zA-Z]+\w*:)(\s*)(\w+)',
+ bygroups(Name.Function, Text, Name.Variable)),
+ (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
+ (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
+ bygroups(Name.Function, Text, Name.Variable, Text)),
+ ],
+ 'blockvariables': [
+ include('whitespaces'),
+ (r'(:)(\s*)(\w+)',
+ bygroups(Operator, Text, Name.Variable)),
+ (r'\|', Operator, '#pop'),
+ default('#pop'), # else pop
+ ],
+ 'literals': [
+ (r"'(''|[^'])*'", String, 'afterobject'),
+ (r'\$.', String.Char, 'afterobject'),
+ (r'#\(', String.Symbol, 'parenth'),
+ (r'\)', Text, 'afterobject'),
+ (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
+ ],
+ '_parenth_helper': [
+ include('whitespaces'),
+ (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
+ (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
+ # literals
+ (r"'(''|[^'])*'", String),
+ (r'\$.', String.Char),
+ (r'#*\(', String.Symbol, 'inner_parenth'),
+ ],
+ 'parenth': [
+ # This state is a bit tricky since
+ # we can't just pop this state
+ (r'\)', String.Symbol, ('root', 'afterobject')),
+ include('_parenth_helper'),
+ ],
+ 'inner_parenth': [
+ (r'\)', String.Symbol, '#pop'),
+ include('_parenth_helper'),
+ ],
+ 'whitespaces': [
+ # skip whitespace and comments
+ (r'\s+', Text),
+ (r'"(""|[^"])*"', Comment),
+ ],
+ 'objects': [
+ (r'\[', Text, 'blockvariables'),
+ (r'\]', Text, 'afterobject'),
+ (r'\b(self|super|true|false|nil|thisContext)\b',
+ Name.Builtin.Pseudo, 'afterobject'),
+ (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
+ (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
+ (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
+ String.Symbol, 'afterobject'),
+ include('literals'),
+ ],
+ 'afterobject': [
+ (r'! !$', Keyword, '#pop'), # squeak chunk delimiter
+ include('whitespaces'),
+ (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
+ Name.Builtin, '#pop'),
+ (r'\b(new\b(?!:))', Name.Builtin),
+ (r'\:=|\_', Operator, '#pop'),
+ (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
+ (r'\b[a-zA-Z]+\w*', Name.Function),
+ (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
+ (r'\.', Punctuation, '#pop'),
+ (r';', Punctuation),
+ (r'[\])}]', Text),
+ (r'[\[({]', Text, '#pop'),
+ ],
+ 'squeak fileout': [
+ # Squeak fileout format (optional)
+ (r'^"(""|[^"])*"!', Keyword),
+ (r"^'(''|[^'])*'!", Keyword),
+ (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
+ bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
+ (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
+ bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
+ (r'^(\w+)( subclass: )(#\w+)'
+ r'(\s+instanceVariableNames: )(.*?)'
+ r'(\s+classVariableNames: )(.*?)'
+ r'(\s+poolDictionaries: )(.*?)'
+ r'(\s+category: )(.*?)(!)',
+ bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
+ String, Keyword, String, Keyword, String, Keyword)),
+ (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
+ bygroups(Name.Class, Keyword, String, Keyword)),
+ (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
+ (r'! !$', Keyword),
+ ],
+ }
+
+
+class NewspeakLexer(RegexLexer):
+ """
+ For Newspeak syntax.
+
+ .. versionadded:: 1.1
+ """
+ name = 'Newspeak'
+ url = 'http://newspeaklanguage.org/'
+ filenames = ['*.ns2']
+ aliases = ['newspeak', ]
+ mimetypes = ['text/x-newspeak']
+
+ tokens = {
+ 'root': [
+ (r'\b(Newsqueak2)\b', Keyword.Declaration),
+ (r"'[^']*'", String),
+ (r'\b(class)(\s+)(\w+)(\s*)',
+ bygroups(Keyword.Declaration, Text, Name.Class, Text)),
+ (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
+ Keyword),
+ (r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
+ bygroups(Name.Function, Text, Name.Variable)),
+ (r'(\w+)(\s*)(=)',
+ bygroups(Name.Attribute, Text, Operator)),
+ (r'<\w+>', Comment.Special),
+ include('expressionstat'),
+ include('whitespace')
+ ],
+
+ 'expressionstat': [
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'\d+', Number.Integer),
+ (r':\w+', Name.Variable),
+ (r'(\w+)(::)', bygroups(Name.Variable, Operator)),
+ (r'\w+:', Name.Function),
+ (r'\w+', Name.Variable),
+ (r'\(|\)', Punctuation),
+ (r'\[|\]', Punctuation),
+ (r'\{|\}', Punctuation),
+
+ (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
+ (r'\.|;', Punctuation),
+ include('whitespace'),
+ include('literals'),
+ ],
+ 'literals': [
+ (r'\$.', String),
+ (r"'[^']*'", String),
+ (r"#'[^']*'", String.Symbol),
+ (r"#\w+:?", String.Symbol),
+ (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
+ ],
+ 'whitespace': [
+ (r'\s+', Text),
+ (r'"[^"]*"', Comment)
+ ],
+ }
diff --git a/pygments/lexers/smithy.py b/pygments/lexers/smithy.py
new file mode 100644
index 0000000..69b576e
--- /dev/null
+++ b/pygments/lexers/smithy.py
@@ -0,0 +1,78 @@
+"""
+ pygments.lexers.smithy
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Smithy IDL.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Keyword, Name, String, \
+ Number, Whitespace, Punctuation
+
+__all__ = ['SmithyLexer']
+
+
+class SmithyLexer(RegexLexer):
+ """
+ For Smithy IDL
+
+ .. versionadded:: 2.10
+ """
+ name = 'Smithy'
+ url = 'https://awslabs.github.io/smithy/'
+ filenames = ['*.smithy']
+ aliases = ['smithy']
+
+ unquoted = r'[A-Za-z0-9_\.#$-]+'
+ identifier = r"[A-Za-z0-9_\.#$-]+"
+
+ simple_shapes = (
+ 'use', 'byte', 'short', 'integer', 'long', 'float', 'document',
+ 'double', 'bigInteger', 'bigDecimal', 'boolean', 'blob', 'string',
+ 'timestamp',
+ )
+
+ aggregate_shapes = (
+ 'apply', 'list', 'map', 'set', 'structure', 'union', 'resource',
+ 'operation', 'service', 'trait'
+ )
+
+ tokens = {
+ 'root': [
+ (r'///.*$', Comment.Multiline),
+ (r'//.*$', Comment),
+ (r'@[0-9a-zA-Z\.#-]*', Name.Decorator),
+ (r'(=)', Name.Decorator),
+ (r'^(\$version)(:)(.+)',
+ bygroups(Keyword.Declaration, Name.Decorator, Name.Class)),
+ (r'^(namespace)(\s+' + identifier + r')\b',
+ bygroups(Keyword.Declaration, Name.Class)),
+ (words(simple_shapes,
+ prefix=r'^', suffix=r'(\s+' + identifier + r')\b'),
+ bygroups(Keyword.Declaration, Name.Class)),
+ (words(aggregate_shapes,
+ prefix=r'^', suffix=r'(\s+' + identifier + r')'),
+ bygroups(Keyword.Declaration, Name.Class)),
+ (r'^(metadata)(\s+)((?:\S+)|(?:\"[^"]+\"))(\s*)(=)',
+ bygroups(Keyword.Declaration, Whitespace, Name.Class,
+ Whitespace, Name.Decorator)),
+ (r"(true|false|null)", Keyword.Constant),
+ (r"(-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)", Number),
+ (identifier + ":", Name.Label),
+ (identifier, Name.Variable.Class),
+ (r'\[', Text, "#push"),
+ (r'\]', Text, "#pop"),
+ (r'\(', Text, "#push"),
+ (r'\)', Text, "#pop"),
+ (r'\{', Text, "#push"),
+ (r'\}', Text, "#pop"),
+ (r'"{3}(\\\\|\n|\\")*"{3}', String.Doc),
+ (r'"(\\\\|\n|\\"|[^"])*"', String.Double),
+ (r"'(\\\\|\n|\\'|[^'])*'", String.Single),
+ (r'[:,]+', Punctuation),
+ (r'\s+', Whitespace),
+ ]
+ }
diff --git a/pygments/lexers/smv.py b/pygments/lexers/smv.py
new file mode 100644
index 0000000..b51e170
--- /dev/null
+++ b/pygments/lexers/smv.py
@@ -0,0 +1,78 @@
+"""
+ pygments.lexers.smv
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the SMV languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, Text
+
+__all__ = ['NuSMVLexer']
+
+
+class NuSMVLexer(RegexLexer):
+ """
+ Lexer for the NuSMV language.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'NuSMV'
+ aliases = ['nusmv']
+ filenames = ['*.smv']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ # Comments
+ (r'(?s)\/\-\-.*?\-\-/', Comment),
+ (r'--.*\n', Comment),
+
+ # Reserved
+ (words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR',
+ 'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC',
+ 'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC',
+ 'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN',
+ 'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF',
+ 'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED',
+ 'PREDICATES'), suffix=r'(?![\w$#-])'),
+ Keyword.Declaration),
+ (r'process(?![\w$#-])', Keyword),
+ (words(('array', 'of', 'boolean', 'integer', 'real', 'word'),
+ suffix=r'(?![\w$#-])'), Keyword.Type),
+ (words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword),
+ (words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize',
+ 'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count',
+ 'abs', 'max', 'min'), suffix=r'(?![\w$#-])'),
+ Name.Builtin),
+ (words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G',
+ 'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF',
+ 'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor',
+ 'xnor'), suffix=r'(?![\w$#-])'),
+ Operator.Word),
+ (words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
+
+ # Names
+ (r'[a-zA-Z_][\w$#-]*', Name.Variable),
+
+ # Operators
+ (r':=', Operator),
+ (r'[-&|+*/<>!=]', Operator),
+
+ # Literals
+ (r'\-?\d+\b', Number.Integer),
+ (r'0[su][bB]\d*_[01_]+', Number.Bin),
+ (r'0[su][oO]\d*_[0-7_]+', Number.Oct),
+ (r'0[su][dD]\d*_[\d_]+', Number.Decimal),
+ (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
+
+ # Whitespace, punctuation and the rest
+ (r'\s+', Text.Whitespace),
+ (r'[()\[\]{};?:.,]', Punctuation),
+ ],
+ }
diff --git a/pygments/lexers/snobol.py b/pygments/lexers/snobol.py
new file mode 100644
index 0000000..c336d19
--- /dev/null
+++ b/pygments/lexers/snobol.py
@@ -0,0 +1,82 @@
+"""
+ pygments.lexers.snobol
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the SNOBOL language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['SnobolLexer']
+
+
+class SnobolLexer(RegexLexer):
+ """
+ Lexer for the SNOBOL4 programming language.
+
+ Recognizes the common ASCII equivalents of the original SNOBOL4 operators.
+ Does not require spaces around binary operators.
+
+ .. versionadded:: 1.5
+ """
+
+ name = "Snobol"
+ aliases = ["snobol"]
+ filenames = ['*.snobol']
+ mimetypes = ['text/x-snobol']
+
+ tokens = {
+ # root state, start of line
+ # comments, continuation lines, and directives start in column 1
+ # as do labels
+ 'root': [
+ (r'\*.*\n', Comment),
+ (r'[+.] ', Punctuation, 'statement'),
+ (r'-.*\n', Comment),
+ (r'END\s*\n', Name.Label, 'heredoc'),
+ (r'[A-Za-z$][\w$]*', Name.Label, 'statement'),
+ (r'\s+', Text, 'statement'),
+ ],
+ # statement state, line after continuation or label
+ 'statement': [
+ (r'\s*\n', Text, '#pop'),
+ (r'\s+', Text),
+ (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|'
+ r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|'
+ r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|'
+ r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])',
+ Name.Builtin),
+ (r'[A-Za-z][\w.]*', Name),
+ # ASCII equivalents of original operators
+ # | for the EBCDIC equivalent, ! likewise
+ # \ for EBCDIC negation
+ (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator),
+ (r'"[^"]*"', String),
+ (r"'[^']*'", String),
+ # Accept SPITBOL syntax for real numbers
+ # as well as Macro SNOBOL4
+ (r'[0-9]+(?=[^.EeDd])', Number.Integer),
+ (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float),
+ # Goto
+ (r':', Punctuation, 'goto'),
+ (r'[()<>,;]', Punctuation),
+ ],
+ # Goto block
+ 'goto': [
+ (r'\s*\n', Text, "#pop:2"),
+ (r'\s+', Text),
+ (r'F|S', Keyword),
+ (r'(\()([A-Za-z][\w.]*)(\))',
+ bygroups(Punctuation, Name.Label, Punctuation))
+ ],
+ # everything after the END statement is basically one
+ # big heredoc.
+ 'heredoc': [
+ (r'.*\n', String.Heredoc)
+ ]
+ }
diff --git a/pygments/lexers/solidity.py b/pygments/lexers/solidity.py
new file mode 100644
index 0000000..46a3b9e
--- /dev/null
+++ b/pygments/lexers/solidity.py
@@ -0,0 +1,87 @@
+"""
+ pygments.lexers.solidity
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Solidity.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['SolidityLexer']
+
+
+class SolidityLexer(RegexLexer):
+ """
+ For Solidity source code.
+
+ .. versionadded:: 2.5
+ """
+
+ name = 'Solidity'
+ aliases = ['solidity']
+ filenames = ['*.sol']
+ mimetypes = []
+
+ datatype = (
+ r'\b(address|bool|(?:(?:bytes|hash|int|string|uint)(?:8|16|24|32|40|48|56|64'
+ r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208'
+ r'|216|224|232|240|248|256)?))\b'
+ )
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ (r'\bpragma\s+solidity\b', Keyword, 'pragma'),
+ (r'\b(contract)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword, Whitespace, Name.Entity)),
+ (datatype + r'(\s+)((?:external|public|internal|private)\s+)?' +
+ r'([a-zA-Z_]\w*)',
+ bygroups(Keyword.Type, Whitespace, Keyword, Name.Variable)),
+ (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Keyword.Type, Whitespace, Name.Variable)),
+ (r'\b(msg|block|tx)\.([A-Za-z_][a-zA-Z0-9_]*)\b', Keyword),
+ (words((
+ 'block', 'break', 'constant', 'constructor', 'continue',
+ 'contract', 'do', 'else', 'external', 'false', 'for',
+ 'function', 'if', 'import', 'inherited', 'internal', 'is',
+ 'library', 'mapping', 'memory', 'modifier', 'msg', 'new',
+ 'payable', 'private', 'public', 'require', 'return',
+ 'returns', 'struct', 'suicide', 'throw', 'this', 'true',
+ 'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Type),
+ (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin),
+ (datatype, Keyword.Type),
+ include('constants'),
+ (r'[a-zA-Z_]\w*', Text),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
+ (r'[.;{}(),\[\]]', Punctuation)
+ ],
+ 'comments': [
+ (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
+ (r'/(\\\n)?[*][\w\W]*', Comment.Multiline)
+ ],
+ 'constants': [
+ (r'("(\\"|.)*?")', String.Double),
+ (r"('(\\'|.)*?')", String.Single),
+ (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex),
+ (r'\b\d+\b', Number.Decimal),
+ ],
+ 'pragma': [
+ include('whitespace'),
+ include('comments'),
+ (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)',
+ bygroups(Operator, Whitespace, Keyword)),
+ (r';', Punctuation, '#pop')
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ (r'\n', Whitespace)
+ ]
+ }
diff --git a/pygments/lexers/sophia.py b/pygments/lexers/sophia.py
new file mode 100644
index 0000000..fc90181
--- /dev/null
+++ b/pygments/lexers/sophia.py
@@ -0,0 +1,103 @@
+"""
+ pygments.lexers.sophia
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Sophia.
+
+ Derived from pygments/lexers/reason.py.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, default, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text
+
+__all__ = ['SophiaLexer']
+
+class SophiaLexer(RegexLexer):
+ """
+ A Sophia lexer.
+
+ .. versionadded:: 2.11
+ """
+
+ name = 'Sophia'
+ aliases = ['sophia']
+ filenames = ['*.aes']
+ mimetypes = []
+
+ keywords = (
+ 'contract', 'include', 'let', 'switch', 'type', 'record', 'datatype',
+ 'if', 'elif', 'else', 'function', 'stateful', 'payable', 'public',
+ 'entrypoint', 'private', 'indexed', 'namespace', 'interface', 'main',
+ 'using', 'as', 'for', 'hiding',
+ )
+
+ builtins = ('state', 'put', 'abort', 'require')
+
+ word_operators = ('mod', 'band', 'bor', 'bxor', 'bnot')
+
+ primitive_types = ('int', 'address', 'bool', 'bits', 'bytes', 'string',
+ 'list', 'option', 'char', 'unit', 'map', 'event',
+ 'hash', 'signature', 'oracle', 'oracle_query')
+
+ tokens = {
+ 'escape-sequence': [
+ (r'\\[\\"\'ntbr]', String.Escape),
+ (r'\\[0-9]{3}', String.Escape),
+ (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ ],
+ 'root': [
+ (r'\s+', Text.Whitespace),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Class, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name.Function),
+ (r'//.*?\n', Comment.Single),
+ (r'\/\*(?!/)', Comment.Multiline, 'comment'),
+
+ (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+ (r'#[\da-fA-F][\da-fA-F_]*', Name.Label),
+ (r'\d[\d_]*', Number.Integer),
+
+ (words(keywords, suffix=r'\b'), Keyword),
+ (words(builtins, suffix=r'\b'), Name.Builtin),
+ (words(word_operators, prefix=r'\b', suffix=r'\b'), Operator.Word),
+ (words(primitive_types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+
+ (r'[=!<>+\\*/:&|?~@^-]', Operator.Word),
+ (r'[.;:{}(),\[\]]', Punctuation),
+
+ (r"(ak_|ok_|oq_|ct_)[\w']*", Name.Label),
+ (r"[^\W\d][\w']*", Name),
+
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+ String.Char),
+ (r"'.'", String.Char),
+ (r"'[a-z][\w]*", Name.Variable),
+
+ (r'"', String.Double, 'string')
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'\/\*', Comment.Multiline, '#push'),
+ (r'\*\/', Comment.Multiline, '#pop'),
+ (r'\*', Comment.Multiline),
+ ],
+ 'string': [
+ (r'[^\\"]+', String.Double),
+ include('escape-sequence'),
+ (r'\\\n', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'dotted': [
+ (r'\s+', Text),
+ (r'\.', Punctuation),
+ (r'[A-Z][\w\']*(?=\s*\.)', Name.Function),
+ (r'[A-Z][\w\']*', Name.Function, '#pop'),
+ (r'[a-z_][\w\']*', Name, '#pop'),
+ default('#pop'),
+ ],
+ }
+
diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py
new file mode 100644
index 0000000..6db68a5
--- /dev/null
+++ b/pygments/lexers/special.py
@@ -0,0 +1,116 @@
+"""
+ pygments.lexers.special
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Special lexers.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import ast
+
+from pygments.lexer import Lexer, line_re
+from pygments.token import Token, Error, Text, Generic
+from pygments.util import get_choice_opt
+
+
+__all__ = ['TextLexer', 'OutputLexer', 'RawTokenLexer']
+
+
+class TextLexer(Lexer):
+ """
+ "Null" lexer, doesn't highlight anything.
+ """
+ name = 'Text only'
+ aliases = ['text']
+ filenames = ['*.txt']
+ mimetypes = ['text/plain']
+ priority = 0.01
+
+ def get_tokens_unprocessed(self, text):
+ yield 0, Text, text
+
+ def analyse_text(text):
+ return TextLexer.priority
+
+
+class OutputLexer(Lexer):
+ """
+ Simple lexer that highlights everything as ``Token.Generic.Output``.
+
+ .. versionadded:: 2.10
+ """
+ name = 'Text output'
+ aliases = ['output']
+
+ def get_tokens_unprocessed(self, text):
+ yield 0, Generic.Output, text
+
+
+_ttype_cache = {}
+
+
+class RawTokenLexer(Lexer):
+ """
+ Recreate a token stream formatted with the `RawTokenFormatter`.
+
+ Additional options accepted:
+
+ `compress`
+ If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
+ the given compression algorithm before lexing (default: ``""``).
+ """
+ name = 'Raw token data'
+ aliases = []
+ filenames = []
+ mimetypes = ['application/x-pygments-tokens']
+
+ def __init__(self, **options):
+ self.compress = get_choice_opt(options, 'compress',
+ ['', 'none', 'gz', 'bz2'], '')
+ Lexer.__init__(self, **options)
+
+ def get_tokens(self, text):
+ if self.compress:
+ if isinstance(text, str):
+ text = text.encode('latin1')
+ try:
+ if self.compress == 'gz':
+ import gzip
+ text = gzip.decompress(text)
+ elif self.compress == 'bz2':
+ import bz2
+ text = bz2.decompress(text)
+ except OSError:
+ yield Error, text.decode('latin1')
+ if isinstance(text, bytes):
+ text = text.decode('latin1')
+
+ # do not call Lexer.get_tokens() because stripping is not optional.
+ text = text.strip('\n') + '\n'
+ for i, t, v in self.get_tokens_unprocessed(text):
+ yield t, v
+
+ def get_tokens_unprocessed(self, text):
+ length = 0
+ for match in line_re.finditer(text):
+ try:
+ ttypestr, val = match.group().rstrip().split('\t', 1)
+ ttype = _ttype_cache.get(ttypestr)
+ if not ttype:
+ ttype = Token
+ ttypes = ttypestr.split('.')[1:]
+ for ttype_ in ttypes:
+ if not ttype_ or not ttype_[0].isupper():
+ raise ValueError('malformed token name')
+ ttype = getattr(ttype, ttype_)
+ _ttype_cache[ttypestr] = ttype
+ val = ast.literal_eval(val)
+ if not isinstance(val, str):
+ raise ValueError('expected str')
+ except (SyntaxError, ValueError):
+ val = match.group()
+ ttype = Error
+ yield length, ttype, val
+ length += len(val)
diff --git a/pygments/lexers/spice.py b/pygments/lexers/spice.py
new file mode 100644
index 0000000..53f111b
--- /dev/null
+++ b/pygments/lexers/spice.py
@@ -0,0 +1,71 @@
+"""
+ pygments.lexers.spice
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Spice programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['SpiceLexer']
+
+
+class SpiceLexer(RegexLexer):
+ """
+ For Spice source.
+
+ .. versionadded:: 2.11
+ """
+ name = 'Spice'
+ url = 'https://www.spicelang.com'
+ filenames = ['*.spice']
+ aliases = ['spice', 'spicelang']
+ mimetypes = ['text/x-spice']
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'\\\n', Text),
+ # comments
+ (r'//(.*?)\n', Comment.Single),
+ (r'/(\\\n)?[*]{2}(.|\n)*?[*](\\\n)?/', String.Doc),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ # keywords
+ (r'(import|as)\b', Keyword.Namespace),
+ (r'(f|p|type|struct|enum)\b', Keyword.Declaration),
+ (words(('if', 'else', 'for', 'foreach', 'while', 'break',
+ 'continue', 'return', 'assert', 'thread', 'unsafe', 'ext',
+ 'dll'), suffix=r'\b'), Keyword),
+ (words(('const', 'signed', 'unsigned', 'inline', 'public'),
+ suffix=r'\b'), Keyword.Pseudo),
+ (words(('new', 'switch', 'case', 'yield', 'stash', 'pick', 'sync',
+ 'class'), suffix=r'\b'), Keyword.Reserved),
+ (r'(true|false|nil)\b', Keyword.Constant),
+ (words(('double', 'int', 'short', 'long', 'byte', 'char', 'string',
+ 'bool', 'dyn'), suffix=r'\b'), Keyword.Type),
+ (words(('printf', 'sizeof', 'len', 'tid', 'join'), suffix=r'\b(\()'),
+ bygroups(Name.Builtin, Punctuation)),
+ # numeric literals
+ (r'[0-9]*[.][0-9]+', Number.Double),
+ (r'0[bB][01]+[sl]?', Number.Bin),
+ (r'0[oO][0-7]+[sl]?', Number.Oct),
+ (r'0[xXhH][0-9a-fA-F]+[sl]?', Number.Hex),
+ (r'(0[dD])?[0-9]+[sl]?', Number.Integer),
+ # string literal
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ # char literal
+ (r'\'(\\\\|\\[^\\]|[^\'\\])\'', String.Char),
+ # tokens
+ (r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||'
+ r'\+\+|--|\%|\^|\~|==|!=|::|[.]{3}|[+\-*/&]', Operator),
+ (r'[|<>=!()\[\]{}.,;:\?]', Punctuation),
+ # identifiers
+ (r'[^\W\d]\w*', Name.Other),
+ ]
+ }
diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py
new file mode 100644
index 0000000..043a3d1
--- /dev/null
+++ b/pygments/lexers/sql.py
@@ -0,0 +1,838 @@
+"""
+ pygments.lexers.sql
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for various SQL dialects and related interactive sessions.
+
+ Postgres specific lexers:
+
+ `PostgresLexer`
+ A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
+ lexer are:
+
+ - keywords and data types list parsed from the PG docs (run the
+ `_postgres_builtins` module to update them);
+ - Content of $-strings parsed using a specific lexer, e.g. the content
+ of a PL/Python function is parsed using the Python lexer;
+ - parse PG specific constructs: E-strings, $-strings, U&-strings,
+ different operators and punctuation.
+
+ `PlPgsqlLexer`
+ A lexer for the PL/pgSQL language. Adds a few specific construct on
+ top of the PG SQL lexer (such as <<label>>).
+
+ `PostgresConsoleLexer`
+ A lexer to highlight an interactive psql session:
+
+ - identifies the prompt and does its best to detect the end of command
+ in multiline statement where not all the lines are prefixed by a
+ prompt, telling them apart from the output;
+ - highlights errors in the output and notification levels;
+ - handles psql backslash commands.
+
+ The ``tests/examplefiles`` contains a few test files with data to be
+ parsed by these lexers.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words
+from pygments.token import Punctuation, Whitespace, Text, Comment, Operator, \
+ Keyword, Name, String, Number, Generic, Literal
+from pygments.lexers import get_lexer_by_name, ClassNotFound
+
+from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
+ PSEUDO_TYPES, PLPGSQL_KEYWORDS
+from pygments.lexers._mysql_builtins import \
+ MYSQL_CONSTANTS, \
+ MYSQL_DATATYPES, \
+ MYSQL_FUNCTIONS, \
+ MYSQL_KEYWORDS, \
+ MYSQL_OPTIMIZER_HINTS
+
+from pygments.lexers import _tsql_builtins
+
+
+__all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer',
+ 'SqlLexer', 'TransactSqlLexer', 'MySqlLexer',
+ 'SqliteConsoleLexer', 'RqlLexer']
+
+line_re = re.compile('.*?\n')
+sqlite_prompt_re = re.compile(r'^(?:sqlite| ...)>(?= )')
+
+language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
+
+do_re = re.compile(r'\bDO\b', re.IGNORECASE)
+
+# Regular expressions for analyse_text()
+name_between_bracket_re = re.compile(r'\[[a-zA-Z_]\w*\]')
+name_between_backtick_re = re.compile(r'`[a-zA-Z_]\w*`')
+tsql_go_re = re.compile(r'\bgo\b', re.IGNORECASE)
+tsql_declare_re = re.compile(r'\bdeclare\s+@', re.IGNORECASE)
+tsql_variable_re = re.compile(r'@[a-zA-Z_]\w*\b')
+
+
+def language_callback(lexer, match):
+ """Parse the content of a $-string using a lexer
+
+ The lexer is chosen looking for a nearby LANGUAGE or assumed as
+ plpgsql if inside a DO statement and no LANGUAGE has been found.
+ """
+ lx = None
+ m = language_re.match(lexer.text[match.end():match.end()+100])
+ if m is not None:
+ lx = lexer._get_lexer(m.group(1))
+ else:
+ m = list(language_re.finditer(
+ lexer.text[max(0, match.start()-100):match.start()]))
+ if m:
+ lx = lexer._get_lexer(m[-1].group(1))
+ else:
+ m = list(do_re.finditer(
+ lexer.text[max(0, match.start()-25):match.start()]))
+ if m:
+ lx = lexer._get_lexer('plpgsql')
+
+ # 1 = $, 2 = delimiter, 3 = $
+ yield (match.start(1), String, match.group(1))
+ yield (match.start(2), String.Delimiter, match.group(2))
+ yield (match.start(3), String, match.group(3))
+ # 4 = string contents
+ if lx:
+ yield from lx.get_tokens_unprocessed(match.group(4))
+ else:
+ yield (match.start(4), String, match.group(4))
+ # 5 = $, 6 = delimiter, 7 = $
+ yield (match.start(5), String, match.group(5))
+ yield (match.start(6), String.Delimiter, match.group(6))
+ yield (match.start(7), String, match.group(7))
+
+
+class PostgresBase:
+ """Base class for Postgres-related lexers.
+
+ This is implemented as a mixin to avoid the Lexer metaclass kicking in.
+ this way the different lexer don't have a common Lexer ancestor. If they
+ had, _tokens could be created on this ancestor and not updated for the
+ other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
+ seem to suggest that regexp lexers are not really subclassable.
+ """
+ def get_tokens_unprocessed(self, text, *args):
+ # Have a copy of the entire text to be used by `language_callback`.
+ self.text = text
+ yield from super().get_tokens_unprocessed(text, *args)
+
+ def _get_lexer(self, lang):
+ if lang.lower() == 'sql':
+ return get_lexer_by_name('postgresql', **self.options)
+
+ tries = [lang]
+ if lang.startswith('pl'):
+ tries.append(lang[2:])
+ if lang.endswith('u'):
+ tries.append(lang[:-1])
+ if lang.startswith('pl') and lang.endswith('u'):
+ tries.append(lang[2:-1])
+
+ for lx in tries:
+ try:
+ return get_lexer_by_name(lx, **self.options)
+ except ClassNotFound:
+ pass
+ else:
+ # TODO: better logging
+ # print >>sys.stderr, "language not found:", lang
+ return None
+
+
+class PostgresLexer(PostgresBase, RegexLexer):
+ """
+ Lexer for the PostgreSQL dialect of SQL.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'PostgreSQL SQL dialect'
+ aliases = ['postgresql', 'postgres']
+ mimetypes = ['text/x-postgresql']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'--.*\n?', Comment.Single),
+ (r'/\*', Comment.Multiline, 'multiline-comments'),
+ (r'(' + '|'.join(s.replace(" ", r"\s+")
+ for s in DATATYPES + PSEUDO_TYPES) + r')\b',
+ Name.Builtin),
+ (words(KEYWORDS, suffix=r'\b'), Keyword),
+ (r'[+*/<>=~!@#%^&|`?-]+', Operator),
+ (r'::', Operator), # cast
+ (r'\$\d+', Name.Variable),
+ (r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r"((?:E|U&)?)(')", bygroups(String.Affix, String.Single), 'string'),
+ # quoted identifier
+ (r'((?:U&)?)(")', bygroups(String.Affix, String.Name), 'quoted-ident'),
+ (r'(?s)(\$)([^$]*)(\$)(.*?)(\$)(\2)(\$)', language_callback),
+ (r'[a-z_]\w*', Name),
+
+ # psql variable in SQL
+ (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable),
+
+ (r'[;:()\[\]{},.]', Punctuation),
+ ],
+ 'multiline-comments': [
+ (r'/\*', Comment.Multiline, 'multiline-comments'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[^/*]+', Comment.Multiline),
+ (r'[/*]', Comment.Multiline)
+ ],
+ 'string': [
+ (r"[^']+", String.Single),
+ (r"''", String.Single),
+ (r"'", String.Single, '#pop'),
+ ],
+ 'quoted-ident': [
+ (r'[^"]+', String.Name),
+ (r'""', String.Name),
+ (r'"', String.Name, '#pop'),
+ ],
+ }
+
+
+class PlPgsqlLexer(PostgresBase, RegexLexer):
+ """
+ Handle the extra syntax in Pl/pgSQL language.
+
+ .. versionadded:: 1.5
+ """
+ name = 'PL/pgSQL'
+ aliases = ['plpgsql']
+ mimetypes = ['text/x-plpgsql']
+
+ flags = re.IGNORECASE
+ tokens = {k: l[:] for (k, l) in PostgresLexer.tokens.items()}
+
+ # extend the keywords list
+ for i, pattern in enumerate(tokens['root']):
+ if pattern[1] == Keyword:
+ tokens['root'][i] = (
+ words(KEYWORDS + PLPGSQL_KEYWORDS, suffix=r'\b'),
+ Keyword)
+ del i
+ break
+ else:
+ assert 0, "SQL keywords not found"
+
+ # Add specific PL/pgSQL rules (before the SQL ones)
+ tokens['root'][:0] = [
+ (r'\%[a-z]\w*\b', Name.Builtin), # actually, a datatype
+ (r':=', Operator),
+ (r'\<\<[a-z]\w*\>\>', Name.Label),
+ (r'\#[a-z]\w*\b', Keyword.Pseudo), # #variable_conflict
+ ]
+
+
+class PsqlRegexLexer(PostgresBase, RegexLexer):
+ """
+ Extend the PostgresLexer adding support specific for psql commands.
+
+ This is not a complete psql lexer yet as it lacks prompt support
+ and output rendering.
+ """
+
+ name = 'PostgreSQL console - regexp based lexer'
+ aliases = [] # not public
+
+ flags = re.IGNORECASE
+ tokens = {k: l[:] for (k, l) in PostgresLexer.tokens.items()}
+
+ tokens['root'].append(
+ (r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
+ tokens['psql-command'] = [
+ (r'\n', Text, 'root'),
+ (r'\s+', Whitespace),
+ (r'\\[^\s]+', Keyword.Pseudo),
+ (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable),
+ (r"'(''|[^'])*'", String.Single),
+ (r"`([^`])*`", String.Backtick),
+ (r"[^\s]+", String.Symbol),
+ ]
+
+
+re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]')
+re_psql_command = re.compile(r'\s*\\')
+re_end_command = re.compile(r';\s*(--.*?)?$')
+re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$')
+re_error = re.compile(r'(ERROR|FATAL):')
+re_message = re.compile(
+ r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|'
+ r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)')
+
+
+class lookahead:
+ """Wrap an iterator and allow pushing back an item."""
+ def __init__(self, x):
+ self.iter = iter(x)
+ self._nextitem = None
+
+ def __iter__(self):
+ return self
+
+ def send(self, i):
+ self._nextitem = i
+ return i
+
+ def __next__(self):
+ if self._nextitem is not None:
+ ni = self._nextitem
+ self._nextitem = None
+ return ni
+ return next(self.iter)
+ next = __next__
+
+
+class PostgresConsoleLexer(Lexer):
+ """
+ Lexer for psql sessions.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'PostgreSQL console (psql)'
+ aliases = ['psql', 'postgresql-console', 'postgres-console']
+ mimetypes = ['text/x-postgresql-psql']
+
+ def get_tokens_unprocessed(self, data):
+ sql = PsqlRegexLexer(**self.options)
+
+ lines = lookahead(line_re.findall(data))
+
+ # prompt-output cycle
+ while 1:
+
+ # consume the lines of the command: start with an optional prompt
+ # and continue until the end of command is detected
+ curcode = ''
+ insertions = []
+ for line in lines:
+ # Identify a shell prompt in case of psql commandline example
+ if line.startswith('$') and not curcode:
+ lexer = get_lexer_by_name('console', **self.options)
+ yield from lexer.get_tokens_unprocessed(line)
+ break
+
+ # Identify a psql prompt
+ mprompt = re_prompt.match(line)
+ if mprompt is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, mprompt.group())]))
+ curcode += line[len(mprompt.group()):]
+ else:
+ curcode += line
+
+ # Check if this is the end of the command
+ # TODO: better handle multiline comments at the end with
+ # a lexer with an external state?
+ if re_psql_command.match(curcode) \
+ or re_end_command.search(curcode):
+ break
+
+ # Emit the combined stream of command and prompt(s)
+ yield from do_insertions(insertions,
+ sql.get_tokens_unprocessed(curcode))
+
+ # Emit the output lines
+ out_token = Generic.Output
+ for line in lines:
+ mprompt = re_prompt.match(line)
+ if mprompt is not None:
+ # push the line back to have it processed by the prompt
+ lines.send(line)
+ break
+
+ mmsg = re_message.match(line)
+ if mmsg is not None:
+ if mmsg.group(1).startswith("ERROR") \
+ or mmsg.group(1).startswith("FATAL"):
+ out_token = Generic.Error
+ yield (mmsg.start(1), Generic.Strong, mmsg.group(1))
+ yield (mmsg.start(2), out_token, mmsg.group(2))
+ else:
+ yield (0, out_token, line)
+ else:
+ return
+
+
+class SqlLexer(RegexLexer):
+ """
+ Lexer for Structured Query Language. Currently, this lexer does
+ not recognize any special syntax except ANSI SQL.
+ """
+
+ name = 'SQL'
+ aliases = ['sql']
+ filenames = ['*.sql']
+ mimetypes = ['text/x-sql']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'--.*\n?', Comment.Single),
+ (r'/\*', Comment.Multiline, 'multiline-comments'),
+ (words((
+ 'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER',
+ 'AGGREGATE', 'ALIAS', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE',
+ 'AND', 'ANY', 'ARE', 'AS', 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT',
+ 'ASYMMETRIC', 'AT', 'ATOMIC', 'AUTHORIZATION', 'AVG', 'BACKWARD',
+ 'BEFORE', 'BEGIN', 'BETWEEN', 'BITVAR', 'BIT_LENGTH', 'BOTH', 'BREADTH',
+ 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', 'CASCADE',
+ 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CATALOG_NAME', 'CHAIN',
+ 'CHARACTERISTICS', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG',
+ 'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHAR_LENGTH', 'CHECK',
+ 'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE',
+ 'CLUSTER', 'COALESCE', 'COBOL', 'COLLATE', 'COLLATION',
+ 'COLLATION_CATALOG', 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLUMN',
+ 'COLUMN_NAME', 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT',
+ 'COMMIT', 'COMMITTED', 'COMPLETION', 'CONDITION_NUMBER', 'CONNECT',
+ 'CONNECTION', 'CONNECTION_NAME', 'CONSTRAINT', 'CONSTRAINTS',
+ 'CONSTRAINT_CATALOG', 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA',
+ 'CONSTRUCTOR', 'CONTAINS', 'CONTINUE', 'CONVERSION', 'CONVERT',
+ 'COPY', 'CORRESPONDING', 'COUNT', 'CREATE', 'CREATEDB', 'CREATEUSER',
+ 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_DATE', 'CURRENT_PATH',
+ 'CURRENT_ROLE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER',
+ 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE',
+ 'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY',
+ 'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE',
+ 'DEFERRED', 'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS',
+ 'DEREF', 'DESC', 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR',
+ 'DETERMINISTIC', 'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH',
+ 'DISTINCT', 'DO', 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION',
+ 'DYNAMIC_FUNCTION_CODE', 'EACH', 'ELSE', 'ELSIF', 'ENCODING',
+ 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY', 'EXCEPTION',
+ 'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING',
+ 'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL',
+ 'FIRST', 'FOR', 'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE',
+ 'FREEZE', 'FROM', 'FULL', 'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET',
+ 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED', 'GROUP', 'GROUPING',
+ 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY', 'IF',
+ 'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMEDIATELY', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT',
+ 'IN', 'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX',
+ 'INHERITS', 'INITIALIZE', 'INITIALLY', 'INNER', 'INOUT', 'INPUT',
+ 'INSENSITIVE', 'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INTERSECT', 'INTO',
+ 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'ITERATE', 'JOIN', 'KEY',
+ 'KEY_MEMBER', 'KEY_TYPE', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST',
+ 'LATERAL', 'LEADING', 'LEFT', 'LENGTH', 'LESS', 'LEVEL', 'LIKE', 'LIMIT',
+ 'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION',
+ 'LOCATOR', 'LOCK', 'LOWER', 'MAP', 'MATCH', 'MAX', 'MAXVALUE',
+ 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', 'MESSAGE_TEXT', 'METHOD', 'MIN',
+ 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', 'MODIFY', 'MONTH',
+ 'MORE', 'MOVE', 'MUMPS', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NCLOB',
+ 'NEW', 'NEXT', 'NO', 'NOCREATEDB', 'NOCREATEUSER', 'NONE', 'NOT',
+ 'NOTHING', 'NOTIFY', 'NOTNULL', 'NULL', 'NULLABLE', 'NULLIF', 'OBJECT',
+ 'OCTET_LENGTH', 'OF', 'OFF', 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY',
+ 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', 'OR', 'ORDER',
+ 'ORDINALITY', 'OUT', 'OUTER', 'OUTPUT', 'OVERLAPS', 'OVERLAY',
+ 'OVERRIDING', 'OWNER', 'PAD', 'PARAMETER', 'PARAMETERS', 'PARAMETER_MODE',
+ 'PARAMETER_NAME', 'PARAMETER_ORDINAL_POSITION',
+ 'PARAMETER_SPECIFIC_CATALOG', 'PARAMETER_SPECIFIC_NAME',
+ 'PARAMETER_SPECIFIC_SCHEMA', 'PARTIAL', 'PASCAL', 'PENDANT', 'PERIOD', 'PLACING',
+ 'PLI', 'POSITION', 'POSTFIX', 'PRECEEDS', 'PRECISION', 'PREFIX', 'PREORDER',
+ 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL',
+ 'PROCEDURE', 'PUBLIC', 'READ', 'READS', 'RECHECK', 'RECURSIVE', 'REF',
+ 'REFERENCES', 'REFERENCING', 'REINDEX', 'RELATIVE', 'RENAME',
+ 'REPEATABLE', 'REPLACE', 'RESET', 'RESTART', 'RESTRICT', 'RESULT',
+ 'RETURN', 'RETURNED_LENGTH', 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE',
+ 'RETURNS', 'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE',
+ 'ROUTINE_CATALOG', 'ROUTINE_NAME', 'ROUTINE_SCHEMA', 'ROW', 'ROWS',
+ 'ROW_COUNT', 'RULE', 'SAVE_POINT', 'SCALE', 'SCHEMA', 'SCHEMA_NAME',
+ 'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SELF',
+ 'SENSITIVE', 'SERIALIZABLE', 'SERVER_NAME', 'SESSION', 'SESSION_USER',
+ 'SET', 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SIZE',
+ 'SOME', 'SOURCE', 'SPACE', 'SPECIFIC', 'SPECIFICTYPE', 'SPECIFIC_NAME',
+ 'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNINIG',
+ 'STABLE', 'START', 'STATE', 'STATEMENT', 'STATIC', 'STATISTICS', 'STDIN',
+ 'STDOUT', 'STORAGE', 'STRICT', 'STRUCTURE', 'STYPE', 'SUBCLASS_ORIGIN',
+ 'SUBLIST', 'SUBSTRING', 'SUCCEEDS', 'SUM', 'SYMMETRIC', 'SYSID', 'SYSTEM',
+ 'SYSTEM_USER', 'TABLE', 'TABLE_NAME', ' TEMP', 'TEMPLATE', 'TEMPORARY',
+ 'TERMINATE', 'THAN', 'THEN', 'TIME', 'TIMESTAMP', 'TIMEZONE_HOUR',
+ 'TIMEZONE_MINUTE', 'TO', 'TOAST', 'TRAILING', 'TRANSACTION',
+ 'TRANSACTIONS_COMMITTED', 'TRANSACTIONS_ROLLED_BACK', 'TRANSACTION_ACTIVE',
+ 'TRANSFORM', 'TRANSFORMS', 'TRANSLATE', 'TRANSLATION', 'TREAT', 'TRIGGER',
+ 'TRIGGER_CATALOG', 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM', 'TRUE',
+ 'TRUNCATE', 'TRUSTED', 'TYPE', 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED',
+ 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', 'UNNAMED', 'UNNEST', 'UNTIL',
+ 'UPDATE', 'UPPER', 'USAGE', 'USER', 'USER_DEFINED_TYPE_CATALOG',
+ 'USER_DEFINED_TYPE_NAME', 'USER_DEFINED_TYPE_SCHEMA', 'USING', 'VACUUM',
+ 'VALID', 'VALIDATOR', 'VALUES', 'VARIABLE', 'VERBOSE',
+ 'VERSION', 'VERSIONS', 'VERSIONING', 'VIEW',
+ 'VOLATILE', 'WHEN', 'WHENEVER', 'WHERE', 'WITH', 'WITHOUT', 'WORK',
+ 'WRITE', 'YEAR', 'ZONE'), suffix=r'\b'),
+ Keyword),
+ (words((
+ 'ARRAY', 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR',
+ 'CHARACTER', 'DATE', 'DEC', 'DECIMAL', 'FLOAT', 'INT', 'INTEGER',
+ 'INTERVAL', 'NUMBER', 'NUMERIC', 'REAL', 'SERIAL', 'SMALLINT',
+ 'VARCHAR', 'VARYING', 'INT8', 'SERIAL8', 'TEXT'), suffix=r'\b'),
+ Name.Builtin),
+ (r'[+*/<>=~!@#%^&|`?-]', Operator),
+ (r'[0-9]+', Number.Integer),
+ # TODO: Backslash escapes?
+ (r"'(''|[^'])*'", String.Single),
+ (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
+ (r'[a-z_][\w$]*', Name), # allow $s in strings for Oracle
+ (r'[;:()\[\],.]', Punctuation)
+ ],
+ 'multiline-comments': [
+ (r'/\*', Comment.Multiline, 'multiline-comments'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[^/*]+', Comment.Multiline),
+ (r'[/*]', Comment.Multiline)
+ ]
+ }
+
+ def analyse_text(self, text):
+ return
+
+
+class TransactSqlLexer(RegexLexer):
+ """
+ Transact-SQL (T-SQL) is Microsoft's and Sybase's proprietary extension to
+ SQL.
+
+ The list of keywords includes ODBC and keywords reserved for future use..
+ """
+
+ name = 'Transact-SQL'
+ aliases = ['tsql', 't-sql']
+ filenames = ['*.sql']
+ mimetypes = ['text/x-tsql']
+
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'--.*?$\n?', Comment.Single),
+ (r'/\*', Comment.Multiline, 'multiline-comments'),
+ (words(_tsql_builtins.OPERATORS), Operator),
+ (words(_tsql_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
+ (words(_tsql_builtins.TYPES, suffix=r'\b'), Name.Class),
+ (words(_tsql_builtins.FUNCTIONS, suffix=r'\b'), Name.Function),
+ (r'(goto)(\s+)(\w+\b)', bygroups(Keyword, Whitespace, Name.Label)),
+ (words(_tsql_builtins.KEYWORDS, suffix=r'\b'), Keyword),
+ (r'(\[)([^]]+)(\])', bygroups(Operator, Name, Operator)),
+ (r'0x[0-9a-f]+', Number.Hex),
+ # Float variant 1, for example: 1., 1.e2, 1.2e3
+ (r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
+ # Float variant 2, for example: .1, .1e2
+ (r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
+ # Float variant 3, for example: 123e45
+ (r'[0-9]+e[+-]?[0-9]+', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r"'(''|[^'])*'", String.Single),
+ (r'"(""|[^"])*"', String.Symbol),
+ (r'[;(),.]', Punctuation),
+ # Below we use \w even for the first "real" character because
+ # tokens starting with a digit have already been recognized
+ # as Number above.
+ (r'@@\w+', Name.Builtin),
+ (r'@\w+', Name.Variable),
+ (r'(\w+)(:)', bygroups(Name.Label, Punctuation)),
+ (r'#?#?\w+', Name), # names for temp tables and anything else
+ (r'\?', Name.Variable.Magic), # parameter for prepared statements
+ ],
+ 'multiline-comments': [
+ (r'/\*', Comment.Multiline, 'multiline-comments'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[^/*]+', Comment.Multiline),
+ (r'[/*]', Comment.Multiline)
+ ]
+ }
+
+ def analyse_text(text):
+ rating = 0
+ if tsql_declare_re.search(text):
+ # Found T-SQL variable declaration.
+ rating = 1.0
+ else:
+ name_between_backtick_count = len(
+ name_between_backtick_re.findall(text))
+ name_between_bracket_count = len(
+ name_between_bracket_re.findall(text))
+ # We need to check if there are any names using
+ # backticks or brackets, as otherwise both are 0
+ # and 0 >= 2 * 0, so we would always assume it's true
+ dialect_name_count = name_between_backtick_count + name_between_bracket_count
+ if dialect_name_count >= 1 and \
+ name_between_bracket_count >= 2 * name_between_backtick_count:
+ # Found at least twice as many [name] as `name`.
+ rating += 0.5
+ elif name_between_bracket_count > name_between_backtick_count:
+ rating += 0.2
+ elif name_between_bracket_count > 0:
+ rating += 0.1
+ if tsql_variable_re.search(text) is not None:
+ rating += 0.1
+ if tsql_go_re.search(text) is not None:
+ rating += 0.1
+ return rating
+
+
+class MySqlLexer(RegexLexer):
+ """The Oracle MySQL lexer.
+
+ This lexer does not attempt to maintain strict compatibility with
+ MariaDB syntax or keywords. Although MySQL and MariaDB's common code
+ history suggests there may be significant overlap between the two,
+ compatibility between the two is not a target for this lexer.
+ """
+
+ name = 'MySQL'
+ aliases = ['mysql']
+ mimetypes = ['text/x-mysql']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+
+ # Comments
+ (r'(?:#|--\s+).*', Comment.Single),
+ (r'/\*\+', Comment.Special, 'optimizer-hints'),
+ (r'/\*', Comment.Multiline, 'multiline-comment'),
+
+ # Hexadecimal literals
+ (r"x'([0-9a-f]{2})+'", Number.Hex), # MySQL requires paired hex characters in this form.
+ (r'0x[0-9a-f]+', Number.Hex),
+
+ # Binary literals
+ (r"b'[01]+'", Number.Bin),
+ (r'0b[01]+', Number.Bin),
+
+ # Numeric literals
+ (r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float), # Mandatory integer, optional fraction and exponent
+ (r'[0-9]*\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Mandatory fraction, optional integer and exponent
+ (r'[0-9]+e[+-]?[0-9]+', Number.Float), # Exponents with integer significands are still floats
+ (r'[0-9]+(?=[^0-9a-z$_\u0080-\uffff])', Number.Integer), # Integers that are not in a schema object name
+
+ # Date literals
+ (r"\{\s*d\s*(?P<quote>['\"])\s*\d{2}(\d{2})?.?\d{2}.?\d{2}\s*(?P=quote)\s*\}",
+ Literal.Date),
+
+ # Time literals
+ (r"\{\s*t\s*(?P<quote>['\"])\s*(?:\d+\s+)?\d{1,2}.?\d{1,2}.?\d{1,2}(\.\d*)?\s*(?P=quote)\s*\}",
+ Literal.Date),
+
+ # Timestamp literals
+ (
+ r"\{\s*ts\s*(?P<quote>['\"])\s*"
+ r"\d{2}(?:\d{2})?.?\d{2}.?\d{2}" # Date part
+ r"\s+" # Whitespace between date and time
+ r"\d{1,2}.?\d{1,2}.?\d{1,2}(\.\d*)?" # Time part
+ r"\s*(?P=quote)\s*\}",
+ Literal.Date
+ ),
+
+ # String literals
+ (r"'", String.Single, 'single-quoted-string'),
+ (r'"', String.Double, 'double-quoted-string'),
+
+ # Variables
+ (r'@@(?:global\.|persist\.|persist_only\.|session\.)?[a-z_]+', Name.Variable),
+ (r'@[a-z0-9_$.]+', Name.Variable),
+ (r"@'", Name.Variable, 'single-quoted-variable'),
+ (r'@"', Name.Variable, 'double-quoted-variable'),
+ (r"@`", Name.Variable, 'backtick-quoted-variable'),
+ (r'\?', Name.Variable), # For demonstrating prepared statements
+
+ # Operators
+ (r'[!%&*+/:<=>^|~-]+', Operator),
+
+ # Exceptions; these words tokenize differently in different contexts.
+ (r'\b(set)(?!\s*\()', Keyword),
+ (r'\b(character)(\s+)(set)\b', bygroups(Keyword, Whitespace, Keyword)),
+ # In all other known cases, "SET" is tokenized by MYSQL_DATATYPES.
+
+ (words(MYSQL_CONSTANTS, prefix=r'\b', suffix=r'\b'), Name.Constant),
+ (words(MYSQL_DATATYPES, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+ (words(MYSQL_KEYWORDS, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(MYSQL_FUNCTIONS, prefix=r'\b', suffix=r'\b(\s*)(\()'),
+ bygroups(Name.Function, Whitespace, Punctuation)),
+
+ # Schema object names
+ #
+ # Note: Although the first regex supports unquoted all-numeric
+ # identifiers, this will not be a problem in practice because
+ # numeric literals have already been handled above.
+ #
+ ('[0-9a-z$_\u0080-\uffff]+', Name),
+ (r'`', Name.Quoted, 'schema-object-name'),
+
+ # Punctuation
+ (r'[(),.;]', Punctuation),
+ ],
+
+ # Multiline comment substates
+ # ---------------------------
+
+ 'optimizer-hints': [
+ (r'[^*a-z]+', Comment.Special),
+ (r'\*/', Comment.Special, '#pop'),
+ (words(MYSQL_OPTIMIZER_HINTS, suffix=r'\b'), Comment.Preproc),
+ ('[a-z]+', Comment.Special),
+ (r'\*', Comment.Special),
+ ],
+
+ 'multiline-comment': [
+ (r'[^*]+', Comment.Multiline),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'\*', Comment.Multiline),
+ ],
+
+ # String substates
+ # ----------------
+
+ 'single-quoted-string': [
+ (r"[^'\\]+", String.Single),
+ (r"''", String.Escape),
+ (r"""\\[0'"bnrtZ\\%_]""", String.Escape),
+ (r"'", String.Single, '#pop'),
+ ],
+
+ 'double-quoted-string': [
+ (r'[^"\\]+', String.Double),
+ (r'""', String.Escape),
+ (r"""\\[0'"bnrtZ\\%_]""", String.Escape),
+ (r'"', String.Double, '#pop'),
+ ],
+
+ # Variable substates
+ # ------------------
+
+ 'single-quoted-variable': [
+ (r"[^']+", Name.Variable),
+ (r"''", Name.Variable),
+ (r"'", Name.Variable, '#pop'),
+ ],
+
+ 'double-quoted-variable': [
+ (r'[^"]+', Name.Variable),
+ (r'""', Name.Variable),
+ (r'"', Name.Variable, '#pop'),
+ ],
+
+ 'backtick-quoted-variable': [
+ (r'[^`]+', Name.Variable),
+ (r'``', Name.Variable),
+ (r'`', Name.Variable, '#pop'),
+ ],
+
+ # Schema object name substates
+ # ----------------------------
+ #
+ # "Name.Quoted" and "Name.Quoted.Escape" are non-standard but
+ # formatters will style them as "Name" by default but add
+ # additional styles based on the token name. This gives users
+ # flexibility to add custom styles as desired.
+ #
+ 'schema-object-name': [
+ (r'[^`]+', Name.Quoted),
+ (r'``', Name.Quoted.Escape),
+ (r'`', Name.Quoted, '#pop'),
+ ],
+ }
+
+ def analyse_text(text):
+ rating = 0
+ name_between_backtick_count = len(
+ name_between_backtick_re.findall(text))
+ name_between_bracket_count = len(
+ name_between_bracket_re.findall(text))
+ # Same logic as above in the TSQL analysis
+ dialect_name_count = name_between_backtick_count + name_between_bracket_count
+ if dialect_name_count >= 1 and \
+ name_between_backtick_count >= 2 * name_between_bracket_count:
+ # Found at least twice as many `name` as [name].
+ rating += 0.5
+ elif name_between_backtick_count > name_between_bracket_count:
+ rating += 0.2
+ elif name_between_backtick_count > 0:
+ rating += 0.1
+ return rating
+
+
+class SqliteConsoleLexer(Lexer):
+ """
+ Lexer for example sessions using sqlite3.
+
+ .. versionadded:: 0.11
+ """
+
+ name = 'sqlite3con'
+ aliases = ['sqlite3']
+ filenames = ['*.sqlite3-console']
+ mimetypes = ['text/x-sqlite3-console']
+
+ def get_tokens_unprocessed(self, data):
+ sql = SqlLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(data):
+ line = match.group()
+ prompt_match = sqlite_prompt_re.match(line)
+ if prompt_match is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:7])]))
+ insertions.append((len(curcode),
+ [(7, Whitespace, ' ')]))
+ curcode += line[8:]
+ else:
+ if curcode:
+ yield from do_insertions(insertions,
+ sql.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ if line.startswith('SQL error: '):
+ yield (match.start(), Generic.Traceback, line)
+ else:
+ yield (match.start(), Generic.Output, line)
+ if curcode:
+ yield from do_insertions(insertions,
+ sql.get_tokens_unprocessed(curcode))
+
+
+class RqlLexer(RegexLexer):
+ """
+ Lexer for Relation Query Language.
+
+ .. versionadded:: 2.0
+ """
+ name = 'RQL'
+ url = 'http://www.logilab.org/project/rql'
+ aliases = ['rql']
+ filenames = ['*.rql']
+ mimetypes = ['text/x-rql']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(DELETE|SET|INSERT|UNION|DISTINCT|WITH|WHERE|BEING|OR'
+ r'|AND|NOT|GROUPBY|HAVING|ORDERBY|ASC|DESC|LIMIT|OFFSET'
+ r'|TODAY|NOW|TRUE|FALSE|NULL|EXISTS)\b', Keyword),
+ (r'[+*/<>=%-]', Operator),
+ (r'(Any|is|instance_of|CWEType|CWRelation)\b', Name.Builtin),
+ (r'[0-9]+', Number.Integer),
+ (r'[A-Z_]\w*\??', Name),
+ (r"'(''|[^'])*'", String.Single),
+ (r'"(""|[^"])*"', String.Single),
+ (r'[;:()\[\],.]', Punctuation)
+ ],
+ }
diff --git a/pygments/lexers/srcinfo.py b/pygments/lexers/srcinfo.py
new file mode 100644
index 0000000..870527a
--- /dev/null
+++ b/pygments/lexers/srcinfo.py
@@ -0,0 +1,62 @@
+"""
+ pygments.lexers.srcinfo
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for .SRCINFO files used by Arch Linux Packages.
+
+ The description of the format can be found in the wiki:
+ https://wiki.archlinux.org/title/.SRCINFO
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Keyword, Name, Operator, Whitespace
+
+__all__ = ['SrcinfoLexer']
+
+keywords = (
+ 'pkgbase', 'pkgname',
+ 'pkgver', 'pkgrel', 'epoch',
+ 'pkgdesc', 'url', 'install', 'changelog',
+ 'arch', 'groups', 'license', 'noextract', 'options', 'backup',
+ 'validpgpkeys',
+)
+
+architecture_dependent_keywords = (
+ 'source', 'depends', 'checkdepends', 'makedepends', 'optdepends',
+ 'provides', 'conflicts', 'replaces',
+ 'md5sums', 'sha1sums', 'sha224sums', 'sha256sums', 'sha384sums',
+ 'sha512sums',
+)
+
+
+class SrcinfoLexer(RegexLexer):
+ """Lexer for .SRCINFO files used by Arch Linux Packages.
+
+ .. versionadded:: 2.11
+ """
+
+ name = 'Srcinfo'
+ aliases = ['srcinfo']
+ filenames = ['.SRCINFO']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#.*', Comment.Single),
+ (words(keywords), Keyword, 'assignment'),
+ (words(architecture_dependent_keywords, suffix=r'_\w+'),
+ Keyword, 'assignment'),
+ (r'\w+', Name.Variable, 'assignment'),
+ ],
+ 'assignment': [
+ (r' +', Whitespace),
+ (r'=', Operator, 'value'),
+ ],
+ 'value': [
+ (r' +', Whitespace),
+ (r'.*', Text, '#pop:2'),
+ ],
+ }
diff --git a/pygments/lexers/stata.py b/pygments/lexers/stata.py
new file mode 100644
index 0000000..4fbe773
--- /dev/null
+++ b/pygments/lexers/stata.py
@@ -0,0 +1,171 @@
+"""
+ pygments.lexers.stata
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Stata
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from pygments.lexer import RegexLexer, default, include, words
+from pygments.token import Comment, Keyword, Name, Number, \
+ String, Text, Operator
+
+from pygments.lexers._stata_builtins import builtins_base, builtins_functions
+
+__all__ = ['StataLexer']
+
+
+class StataLexer(RegexLexer):
+ """
+ For Stata do files.
+
+ .. versionadded:: 2.2
+ """
+ # Syntax based on
+ # - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado
+ # - https://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js
+ # - https://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim
+
+ name = 'Stata'
+ url = 'http://www.stata.com/'
+ aliases = ['stata', 'do']
+ filenames = ['*.do', '*.ado']
+ mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('strings'),
+ include('macros'),
+ include('numbers'),
+ include('keywords'),
+ include('operators'),
+ include('format'),
+ (r'.', Text),
+ ],
+ # Comments are a complicated beast in Stata because they can be
+ # nested and there are a few corner cases with that. See:
+ # - github.com/kylebarron/language-stata/issues/90
+ # - statalist.org/forums/forum/general-stata-discussion/general/1448244
+ 'comments': [
+ (r'(^//|(?<=\s)//)(?!/)', Comment.Single, 'comments-double-slash'),
+ (r'^\s*\*', Comment.Single, 'comments-star'),
+ (r'/\*', Comment.Multiline, 'comments-block'),
+ (r'(^///|(?<=\s)///)', Comment.Special, 'comments-triple-slash')
+ ],
+ 'comments-block': [
+ (r'/\*', Comment.Multiline, '#push'),
+ # this ends and restarts a comment block. but need to catch this so
+ # that it doesn\'t start _another_ level of comment blocks
+ (r'\*/\*', Comment.Multiline),
+ (r'(\*/\s+\*(?!/)[^\n]*)|(\*/)', Comment.Multiline, '#pop'),
+ # Match anything else as a character inside the comment
+ (r'.', Comment.Multiline),
+ ],
+ 'comments-star': [
+ (r'///.*?\n', Comment.Single,
+ ('#pop', 'comments-triple-slash')),
+ (r'(^//|(?<=\s)//)(?!/)', Comment.Single,
+ ('#pop', 'comments-double-slash')),
+ (r'/\*', Comment.Multiline, 'comments-block'),
+ (r'.(?=\n)', Comment.Single, '#pop'),
+ (r'.', Comment.Single),
+ ],
+ 'comments-triple-slash': [
+ (r'\n', Comment.Special, '#pop'),
+ # A // breaks out of a comment for the rest of the line
+ (r'//.*?(?=\n)', Comment.Single, '#pop'),
+ (r'.', Comment.Special),
+ ],
+ 'comments-double-slash': [
+ (r'\n', Text, '#pop'),
+ (r'.', Comment.Single),
+ ],
+ # `"compound string"' and regular "string"; note the former are
+ # nested.
+ 'strings': [
+ (r'`"', String, 'string-compound'),
+ (r'(?<!`)"', String, 'string-regular'),
+ ],
+ 'string-compound': [
+ (r'`"', String, '#push'),
+ (r'"\'', String, '#pop'),
+ (r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
+ include('macros'),
+ (r'.', String)
+ ],
+ 'string-regular': [
+ (r'(")(?!\')|(?=\n)', String, '#pop'),
+ (r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
+ include('macros'),
+ (r'.', String)
+ ],
+ # A local is usually
+ # `\w{0,31}'
+ # `:extended macro'
+ # `=expression'
+ # `[rsen](results)'
+ # `(++--)scalar(++--)'
+ #
+ # However, there are all sorts of weird rules wrt edge
+ # cases. Instead of writing 27 exceptions, anything inside
+ # `' is a local.
+ #
+ # A global is more restricted, so we do follow rules. Note only
+ # locals explicitly enclosed ${} can be nested.
+ 'macros': [
+ (r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
+ (r'\$', Name.Variable.Global, 'macro-global-name'),
+ (r'`', Name.Variable, 'macro-local'),
+ ],
+ 'macro-local': [
+ (r'`', Name.Variable, '#push'),
+ (r"'", Name.Variable, '#pop'),
+ (r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
+ (r'\$', Name.Variable.Global, 'macro-global-name'),
+ (r'.', Name.Variable), # fallback
+ ],
+ 'macro-global-nested': [
+ (r'\$(\{|(?=[$`]))', Name.Variable.Global, '#push'),
+ (r'\}', Name.Variable.Global, '#pop'),
+ (r'\$', Name.Variable.Global, 'macro-global-name'),
+ (r'`', Name.Variable, 'macro-local'),
+ (r'\w', Name.Variable.Global), # fallback
+ default('#pop'),
+ ],
+ 'macro-global-name': [
+ (r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested', '#pop'),
+ (r'\$', Name.Variable.Global, 'macro-global-name', '#pop'),
+ (r'`', Name.Variable, 'macro-local', '#pop'),
+ (r'\w{1,32}', Name.Variable.Global, '#pop'),
+ ],
+ # Built in functions and statements
+ 'keywords': [
+ (words(builtins_functions, prefix = r'\b', suffix = r'(?=\()'),
+ Name.Function),
+ (words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
+ Keyword),
+ ],
+ # http://www.stata.com/help.cgi?operators
+ 'operators': [
+ (r'-|==|<=|>=|<|>|&|!=', Operator),
+ (r'\*|\+|\^|/|!|~|==|~=', Operator)
+ ],
+ # Stata numbers
+ 'numbers': [
+ # decimal number
+ (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b',
+ Number),
+ ],
+ # Stata formats
+ 'format': [
+ (r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Other),
+ (r'%(21x|16H|16L|8H|8L)', Name.Other),
+ (r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg)\S{0,32}', Name.Other),
+ (r'%[-~]?\d{1,4}s', Name.Other),
+ ]
+ }
diff --git a/pygments/lexers/supercollider.py b/pygments/lexers/supercollider.py
new file mode 100644
index 0000000..7cf405f
--- /dev/null
+++ b/pygments/lexers/supercollider.py
@@ -0,0 +1,95 @@
+"""
+ pygments.lexers.supercollider
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for SuperCollider
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['SuperColliderLexer']
+
+
+class SuperColliderLexer(RegexLexer):
+ """
+ For SuperCollider source code.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'SuperCollider'
+ url = 'http://supercollider.github.io/'
+ aliases = ['supercollider', 'sc']
+ filenames = ['*.sc', '*.scd']
+ mimetypes = ['application/supercollider', 'text/supercollider']
+
+ flags = re.DOTALL | re.MULTILINE
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'<!--', Comment),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop'),
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (words((
+ 'for', 'in', 'while', 'do', 'break', 'return', 'continue',
+ 'switch', 'case', 'default', 'if', 'else', 'throw', 'try',
+ 'catch', 'finally', 'new', 'delete', 'typeof', 'instanceof',
+ 'void'), suffix=r'\b'),
+ Keyword, 'slashstartsregex'),
+ (words(('var', 'let', 'with', 'function', 'arg'), suffix=r'\b'),
+ Keyword.Declaration, 'slashstartsregex'),
+ (words((
+ '(abstract', 'boolean', 'byte', 'char', 'class', 'const',
+ 'debugger', 'double', 'enum', 'export', 'extends', 'final',
+ 'float', 'goto', 'implements', 'import', 'int', 'interface',
+ 'long', 'native', 'package', 'private', 'protected', 'public',
+ 'short', 'static', 'super', 'synchronized', 'throws',
+ 'transient', 'volatile'), suffix=r'\b'),
+ Keyword.Reserved),
+ (words(('true', 'false', 'nil', 'inf'), suffix=r'\b'), Keyword.Constant),
+ (words((
+ 'Array', 'Boolean', 'Date', 'Error', 'Function', 'Number',
+ 'Object', 'Packages', 'RegExp', 'String',
+ 'isFinite', 'isNaN', 'parseFloat', 'parseInt', 'super',
+ 'thisFunctionDef', 'thisFunction', 'thisMethod', 'thisProcess',
+ 'thisThread', 'this'), suffix=r'\b'),
+ Name.Builtin),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'\\?[$a-zA-Z_]\w*', String.Symbol),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ]
+ }
+
+ def analyse_text(text):
+ """We're searching for a common function and a unique keyword here."""
+ if 'SinOsc' in text or 'thisFunctionDef' in text:
+ return 0.1
diff --git a/pygments/lexers/tal.py b/pygments/lexers/tal.py
new file mode 100644
index 0000000..391d1da
--- /dev/null
+++ b/pygments/lexers/tal.py
@@ -0,0 +1,74 @@
+"""
+ pygments.lexers.tal
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Uxntal
+
+ .. versionadded:: 2.12
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Keyword, Name, String, Number, \
+ Punctuation, Whitespace, Literal
+
+__all__ = ['TalLexer']
+
+
+class TalLexer(RegexLexer):
+ """
+ For `Uxntal <https://wiki.xxiivv.com/site/uxntal.html>`_ source code.
+
+ .. versionadded:: 2.12
+ """
+
+ name = 'Tal'
+ aliases = ['tal', 'uxntal']
+ filenames = ['*.tal']
+ mimetypes = ['text/x-uxntal']
+
+ instructions = [
+ 'BRK', 'LIT', 'INC', 'POP', 'DUP', 'NIP', 'SWP', 'OVR', 'ROT',
+ 'EQU', 'NEQ', 'GTH', 'LTH', 'JMP', 'JCN', 'JSR', 'STH',
+ 'LDZ', 'STZ', 'LDR', 'STR', 'LDA', 'STA', 'DEI', 'DEO',
+ 'ADD', 'SUB', 'MUL', 'DIV', 'AND', 'ORA', 'EOR', 'SFT'
+ ]
+
+ tokens = {
+ # the comment delimiters must not be adjacent to non-space characters.
+ # this means ( foo ) is a valid comment but (foo) is not. this also
+ # applies to nested comments.
+ 'comment': [
+ (r'(?<!\S)\((?!\S)', Comment.Multiline, '#push'), # nested comments
+ (r'(?<!\S)\)(?!\S)', Comment.Multiline, '#pop'), # nested comments
+ (r'[^()]+', Comment.Multiline), # comments
+ (r'[()]+', Comment.Multiline), # comments
+ ],
+ 'root': [
+ (r'\s+', Whitespace), # spaces
+ (r'(?<!\S)\((?!\S)', Comment.Multiline, 'comment'), # comments
+ (words(instructions, prefix=r'(?<!\S)', suffix=r'2?k?r?(?!\S)'),
+ Keyword.Reserved), # instructions
+ (r'[][{}](?!\S)', Punctuation), # delimiters
+ (r'#([0-9a-f]{2}){1,2}(?!\S)', Number.Hex), # integer
+ (r'"\S+', String), # raw string
+ (r"'\S(?!\S)", String.Char), # raw char
+ (r'([0-9a-f]{2}){1,2}(?!\S)', Literal), # raw integer
+ (r'[|$][0-9a-f]{1,4}(?!\S)', Keyword.Declaration), # abs/rel pad
+ (r'%\S+', Name.Decorator), # macro
+ (r'@\S+', Name.Function), # label
+ (r'&\S+', Name.Label), # sublabel
+ (r'/\S+', Name.Tag), # spacer
+ (r'\.\S+', Name.Variable.Magic), # zero page addr
+ (r',\S+', Name.Variable.Instance), # rel addr
+ (r';\S+', Name.Variable.Global), # abs addr
+ (r':\S+', Literal), # raw addr
+ (r'~\S+', Keyword.Namespace), # include
+ (r'\S+', Name),
+ ]
+ }
+
+ def analyse_text(text):
+ return '|0100' in text[:500]
diff --git a/pygments/lexers/tcl.py b/pygments/lexers/tcl.py
new file mode 100644
index 0000000..f29cc2c
--- /dev/null
+++ b/pygments/lexers/tcl.py
@@ -0,0 +1,149 @@
+"""
+ pygments.lexers.tcl
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Tcl and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Whitespace
+from pygments.util import shebang_matches
+
+__all__ = ['TclLexer']
+
+
+class TclLexer(RegexLexer):
+ """
+ For Tcl source code.
+
+ .. versionadded:: 0.10
+ """
+
+ keyword_cmds_re = words((
+ 'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif',
+ 'else', 'error', 'eval', 'expr', 'for', 'foreach', 'global', 'if',
+ 'namespace', 'proc', 'rename', 'return', 'set', 'switch', 'then',
+ 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable', 'vwait',
+ 'while'), prefix=r'\b', suffix=r'\b')
+
+ builtin_cmds_re = words((
+ 'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close',
+ 'concat', 'dde', 'dict', 'encoding', 'eof', 'exec', 'exit', 'fblocked',
+ 'fconfigure', 'fcopy', 'file', 'fileevent', 'flush', 'format', 'gets',
+ 'glob', 'history', 'http', 'incr', 'info', 'interp', 'join', 'lappend',
+ 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
+ 'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort',
+ 'mathfunc', 'mathop', 'memory', 'msgcat', 'open', 'package', 'pid',
+ 'pkg::create', 'pkg_mkIndex', 'platform', 'platform::shell', 'puts',
+ 'pwd', 're_syntax', 'read', 'refchan', 'regexp', 'registry', 'regsub',
+ 'scan', 'seek', 'socket', 'source', 'split', 'string', 'subst', 'tell',
+ 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
+
+ name = 'Tcl'
+ url = 'https://www.tcl.tk/about/language.html'
+ aliases = ['tcl']
+ filenames = ['*.tcl', '*.rvt']
+ mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
+
+ def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
+ return [
+ (keyword_cmds_re, Keyword, 'params' + context),
+ (builtin_cmds_re, Name.Builtin, 'params' + context),
+ (r'([\w.-]+)', Name.Variable, 'params' + context),
+ (r'#', Comment, 'comment'),
+ ]
+
+ tokens = {
+ 'root': [
+ include('command'),
+ include('basic'),
+ include('data'),
+ (r'\}', Keyword), # HACK: somehow we miscounted our braces
+ ],
+ 'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
+ 'command-in-brace': _gen_command_rules(keyword_cmds_re,
+ builtin_cmds_re,
+ "-in-brace"),
+ 'command-in-bracket': _gen_command_rules(keyword_cmds_re,
+ builtin_cmds_re,
+ "-in-bracket"),
+ 'command-in-paren': _gen_command_rules(keyword_cmds_re,
+ builtin_cmds_re,
+ "-in-paren"),
+ 'basic': [
+ (r'\(', Keyword, 'paren'),
+ (r'\[', Keyword, 'bracket'),
+ (r'\{', Keyword, 'brace'),
+ (r'"', String.Double, 'string'),
+ (r'(eq|ne|in|ni)\b', Operator.Word),
+ (r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
+ ],
+ 'data': [
+ (r'\s+', Whitespace),
+ (r'0x[a-fA-F0-9]+', Number.Hex),
+ (r'0[0-7]+', Number.Oct),
+ (r'\d+\.\d+', Number.Float),
+ (r'\d+', Number.Integer),
+ (r'\$[\w.:-]+', Name.Variable),
+ (r'\$\{[\w.:-]+\}', Name.Variable),
+ (r'[\w.,@:-]+', Text),
+ ],
+ 'params': [
+ (r';', Keyword, '#pop'),
+ (r'\n', Text, '#pop'),
+ (r'(else|elseif|then)\b', Keyword),
+ include('basic'),
+ include('data'),
+ ],
+ 'params-in-brace': [
+ (r'\}', Keyword, ('#pop', '#pop')),
+ include('params')
+ ],
+ 'params-in-paren': [
+ (r'\)', Keyword, ('#pop', '#pop')),
+ include('params')
+ ],
+ 'params-in-bracket': [
+ (r'\]', Keyword, ('#pop', '#pop')),
+ include('params')
+ ],
+ 'string': [
+ (r'\[', String.Double, 'string-square'),
+ (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
+ (r'"', String.Double, '#pop')
+ ],
+ 'string-square': [
+ (r'\[', String.Double, 'string-square'),
+ (r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
+ (r'\]', String.Double, '#pop')
+ ],
+ 'brace': [
+ (r'\}', Keyword, '#pop'),
+ include('command-in-brace'),
+ include('basic'),
+ include('data'),
+ ],
+ 'paren': [
+ (r'\)', Keyword, '#pop'),
+ include('command-in-paren'),
+ include('basic'),
+ include('data'),
+ ],
+ 'bracket': [
+ (r'\]', Keyword, '#pop'),
+ include('command-in-bracket'),
+ include('basic'),
+ include('data'),
+ ],
+ 'comment': [
+ (r'.*[^\\]\n', Comment, '#pop'),
+ (r'.*\\\n', Comment),
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'(tcl)')
diff --git a/pygments/lexers/teal.py b/pygments/lexers/teal.py
new file mode 100644
index 0000000..dfe6ca7
--- /dev/null
+++ b/pygments/lexers/teal.py
@@ -0,0 +1,89 @@
+"""
+ pygments.lexers.teal
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for TEAL.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, words
+from pygments.token import Comment, Name, Number, String, Text, Keyword, \
+ Whitespace
+
+__all__ = ['TealLexer']
+
+
+class TealLexer(RegexLexer):
+ """
+ For the Transaction Execution Approval Language (TEAL)
+
+ For more information about the grammar, see:
+ https://github.com/algorand/go-algorand/blob/master/data/transactions/logic/assembler.go
+
+ .. versionadded:: 2.9
+ """
+ name = 'teal'
+ url = 'https://developer.algorand.org/docs/reference/teal/specification/'
+ aliases = ['teal']
+ filenames = ['*.teal']
+
+ keywords = words({
+ 'Sender', 'Fee', 'FirstValid', 'FirstValidTime', 'LastValid', 'Note',
+ 'Lease', 'Receiver', 'Amount', 'CloseRemainderTo', 'VotePK',
+ 'SelectionPK', 'VoteFirst', 'VoteLast', 'VoteKeyDilution', 'Type',
+ 'TypeEnum', 'XferAsset', 'AssetAmount', 'AssetSender', 'AssetReceiver',
+ 'AssetCloseTo', 'GroupIndex', 'TxID', 'ApplicationID', 'OnCompletion',
+ 'ApplicationArgs', 'NumAppArgs', 'Accounts', 'NumAccounts',
+ 'ApprovalProgram', 'ClearStateProgram', 'RekeyTo', 'ConfigAsset',
+ 'ConfigAssetTotal', 'ConfigAssetDecimals', 'ConfigAssetDefaultFrozen',
+ 'ConfigAssetUnitName', 'ConfigAssetName', 'ConfigAssetURL',
+ 'ConfigAssetMetadataHash', 'ConfigAssetManager', 'ConfigAssetReserve',
+ 'ConfigAssetFreeze', 'ConfigAssetClawback', 'FreezeAsset',
+ 'FreezeAssetAccount', 'FreezeAssetFrozen',
+ 'NoOp', 'OptIn', 'CloseOut', 'ClearState', 'UpdateApplication',
+ 'DeleteApplication',
+ 'MinTxnFee', 'MinBalance', 'MaxTxnLife', 'ZeroAddress', 'GroupSize',
+ 'LogicSigVersion', 'Round', 'LatestTimestamp', 'CurrentApplicationID',
+ 'AssetBalance', 'AssetFrozen',
+ 'AssetTotal', 'AssetDecimals', 'AssetDefaultFrozen', 'AssetUnitName',
+ 'AssetName', 'AssetURL', 'AssetMetadataHash', 'AssetManager',
+ 'AssetReserve', 'AssetFreeze', 'AssetClawback',
+ }, suffix=r'\b')
+
+ identifier = r'[^ \t\n]+(?=\/\/)|[^ \t\n]+'
+ newline = r'\r?\n'
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ # pragmas match specifically on the space character
+ (r'^#pragma .*' + newline, Comment.Directive),
+ # labels must be followed by a space,
+ # but anything after that is ignored
+ ('(' + identifier + ':' + ')' + '([ \t].*)',
+ bygroups(Name.Label, Comment.Single)),
+ (identifier, Name.Function, 'function-args'),
+ ],
+ 'function-args': [
+ include('whitespace'),
+ (r'"', String, 'string'),
+ (r'(b(?:ase)?(?:32|64) ?)(\(?[a-zA-Z0-9+/=]+\)?)',
+ bygroups(String.Affix, String.Other)),
+ (r'[A-Z2-7]{58}', Number), # address
+ (r'0x[\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (keywords, Keyword),
+ (identifier, Name.Attributes), # branch targets
+ (newline, Text, '#pop'),
+ ],
+ 'string': [
+ (r'\\(?:["nrt\\]|x\d\d)', String.Escape),
+ (r'[^\\\"\n]+', String),
+ (r'"', String, '#pop'),
+ ],
+ 'whitespace': [
+ (r'[ \t]+', Whitespace),
+ (r'//[^\n]+', Comment.Single),
+ ],
+ }
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
new file mode 100644
index 0000000..1fcf708
--- /dev/null
+++ b/pygments/lexers/templates.py
@@ -0,0 +1,2300 @@
+"""
+ pygments.lexers.templates
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for various template engines' markup.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexers.html import HtmlLexer, XmlLexer
+from pygments.lexers.javascript import JavascriptLexer, LassoLexer
+from pygments.lexers.css import CssLexer
+from pygments.lexers.php import PhpLexer
+from pygments.lexers.python import PythonLexer
+from pygments.lexers.perl import PerlLexer
+from pygments.lexers.jvm import JavaLexer, TeaLangLexer
+from pygments.lexers.data import YamlLexer
+from pygments.lexers.sql import SqlLexer
+from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
+ include, using, this, default, combined
+from pygments.token import Error, Punctuation, Whitespace, \
+ Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
+from pygments.util import html_doctype_matches, looks_like_xml
+
+__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
+ 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
+ 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
+ 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
+ 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
+ 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
+ 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
+ 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
+ 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
+ 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
+ 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
+ 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
+ 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
+ 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
+ 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
+ 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
+ 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
+ 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
+ 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
+ 'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer',
+ 'SqlJinjaLexer']
+
+
+class ErbLexer(Lexer):
+ """
+ Generic ERB (Ruby Templating) lexer.
+
+ Just highlights ruby code between the preprocessor directives, other data
+ is left untouched by the lexer.
+
+ All options are also forwarded to the `RubyLexer`.
+ """
+
+ name = 'ERB'
+ url = 'https://github.com/ruby/erb'
+ aliases = ['erb']
+ mimetypes = ['application/x-ruby-templating']
+
+ _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
+
+ def __init__(self, **options):
+ from pygments.lexers.ruby import RubyLexer
+ self.ruby_lexer = RubyLexer(**options)
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ """
+ Since ERB doesn't allow "<%" and other tags inside of ruby
+ blocks we have to use a split approach here that fails for
+ that too.
+ """
+ tokens = self._block_re.split(text)
+ tokens.reverse()
+ state = idx = 0
+ try:
+ while True:
+ # text
+ if state == 0:
+ val = tokens.pop()
+ yield idx, Other, val
+ idx += len(val)
+ state = 1
+ # block starts
+ elif state == 1:
+ tag = tokens.pop()
+ # literals
+ if tag in ('<%%', '%%>'):
+ yield idx, Other, tag
+ idx += 3
+ state = 0
+ # comment
+ elif tag == '<%#':
+ yield idx, Comment.Preproc, tag
+ val = tokens.pop()
+ yield idx + 3, Comment, val
+ idx += 3 + len(val)
+ state = 2
+ # blocks or output
+ elif tag in ('<%', '<%=', '<%-'):
+ yield idx, Comment.Preproc, tag
+ idx += len(tag)
+ data = tokens.pop()
+ r_idx = 0
+ for r_idx, r_token, r_value in \
+ self.ruby_lexer.get_tokens_unprocessed(data):
+ yield r_idx + idx, r_token, r_value
+ idx += len(data)
+ state = 2
+ elif tag in ('%>', '-%>'):
+ yield idx, Error, tag
+ idx += len(tag)
+ state = 0
+ # % raw ruby statements
+ else:
+ yield idx, Comment.Preproc, tag[0]
+ r_idx = 0
+ for r_idx, r_token, r_value in \
+ self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
+ yield idx + 1 + r_idx, r_token, r_value
+ idx += len(tag)
+ state = 0
+ # block ends
+ elif state == 2:
+ tag = tokens.pop()
+ if tag not in ('%>', '-%>'):
+ yield idx, Other, tag
+ else:
+ yield idx, Comment.Preproc, tag
+ idx += len(tag)
+ state = 0
+ except IndexError:
+ return
+
+ def analyse_text(text):
+ if '<%' in text and '%>' in text:
+ return 0.4
+
+
+class SmartyLexer(RegexLexer):
+ """
+ Generic Smarty template lexer.
+
+ Just highlights smarty code between the preprocessor directives, other
+ data is left untouched by the lexer.
+ """
+
+ name = 'Smarty'
+ url = 'https://www.smarty.net/'
+ aliases = ['smarty']
+ filenames = ['*.tpl']
+ mimetypes = ['application/x-smarty']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'[^{]+', Other),
+ (r'(\{)(\*.*?\*)(\})',
+ bygroups(Comment.Preproc, Comment, Comment.Preproc)),
+ (r'(\{php\})(.*?)(\{/php\})',
+ bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
+ Comment.Preproc)),
+ (r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
+ bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
+ (r'\{', Comment.Preproc, 'smarty')
+ ],
+ 'smarty': [
+ (r'\s+', Text),
+ (r'\{', Comment.Preproc, '#push'),
+ (r'\}', Comment.Preproc, '#pop'),
+ (r'#[a-zA-Z_]\w*#', Name.Variable),
+ (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
+ (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'[a-zA-Z_]\w*', Name.Attribute)
+ ]
+ }
+
+ def analyse_text(text):
+ rv = 0.0
+ if re.search(r'\{if\s+.*?\}.*?\{/if\}', text):
+ rv += 0.15
+ if re.search(r'\{include\s+file=.*?\}', text):
+ rv += 0.15
+ if re.search(r'\{foreach\s+.*?\}.*?\{/foreach\}', text):
+ rv += 0.15
+ if re.search(r'\{\$.*?\}', text):
+ rv += 0.01
+ return rv
+
+
+class VelocityLexer(RegexLexer):
+ """
+ Generic Velocity template lexer.
+
+ Just highlights velocity directives and variable references, other
+ data is left untouched by the lexer.
+ """
+
+ name = 'Velocity'
+ url = 'https://velocity.apache.org/'
+ aliases = ['velocity']
+ filenames = ['*.vm', '*.fhtml']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ identifier = r'[a-zA-Z_]\w*'
+
+ tokens = {
+ 'root': [
+ (r'[^{#$]+', Other),
+ (r'(#)(\*.*?\*)(#)',
+ bygroups(Comment.Preproc, Comment, Comment.Preproc)),
+ (r'(##)(.*?$)',
+ bygroups(Comment.Preproc, Comment)),
+ (r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
+ bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
+ 'directiveparams'),
+ (r'(#\{?)(' + identifier + r')(\}|\b)',
+ bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
+ (r'\$!?\{?', Punctuation, 'variable')
+ ],
+ 'variable': [
+ (identifier, Name.Variable),
+ (r'\(', Punctuation, 'funcparams'),
+ (r'(\.)(' + identifier + r')',
+ bygroups(Punctuation, Name.Variable), '#push'),
+ (r'\}', Punctuation, '#pop'),
+ default('#pop')
+ ],
+ 'directiveparams': [
+ (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
+ Operator),
+ (r'\[', Operator, 'rangeoperator'),
+ (r'\b' + identifier + r'\b', Name.Function),
+ include('funcparams')
+ ],
+ 'rangeoperator': [
+ (r'\.\.', Operator),
+ include('funcparams'),
+ (r'\]', Operator, '#pop')
+ ],
+ 'funcparams': [
+ (r'\$!?\{?', Punctuation, 'variable'),
+ (r'\s+', Text),
+ (r'[,:]', Punctuation),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r"\b[0-9]+\b", Number),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop'),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ (r'\[', Punctuation, '#push'),
+ (r'\]', Punctuation, '#pop'),
+ ]
+ }
+
+ def analyse_text(text):
+ rv = 0.0
+ if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text, re.DOTALL):
+ rv += 0.25
+ if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text, re.DOTALL):
+ rv += 0.15
+ if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text, re.DOTALL):
+ rv += 0.15
+ if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?'
+ r'(\.\w+(\([^)]*\))?)*\}?', text):
+ rv += 0.01
+ return rv
+
+
+class VelocityHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `VelocityLexer` that highlights unlexed data
+ with the `HtmlLexer`.
+
+ """
+
+ name = 'HTML+Velocity'
+ aliases = ['html+velocity']
+ alias_filenames = ['*.html', '*.fhtml']
+ mimetypes = ['text/html+velocity']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, VelocityLexer, **options)
+
+
+class VelocityXmlLexer(DelegatingLexer):
+ """
+ Subclass of the `VelocityLexer` that highlights unlexed data
+ with the `XmlLexer`.
+
+ """
+
+ name = 'XML+Velocity'
+ aliases = ['xml+velocity']
+ alias_filenames = ['*.xml', '*.vm']
+ mimetypes = ['application/xml+velocity']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, VelocityLexer, **options)
+
+ def analyse_text(text):
+ rv = VelocityLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ return rv
+
+
+class DjangoLexer(RegexLexer):
+ """
+ Generic `django <http://www.djangoproject.com/documentation/templates/>`_
+ and `jinja <https://jinja.pocoo.org/jinja/>`_ template lexer.
+
+ It just highlights django/jinja code between the preprocessor directives,
+ other data is left untouched by the lexer.
+ """
+
+ name = 'Django/Jinja'
+ aliases = ['django', 'jinja']
+ mimetypes = ['application/x-django-templating', 'application/x-jinja']
+
+ flags = re.M | re.S
+
+ tokens = {
+ 'root': [
+ (r'[^{]+', Other),
+ (r'\{\{', Comment.Preproc, 'var'),
+ # jinja/django comments
+ (r'\{#.*?#\}', Comment),
+ # django comments
+ (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
+ r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
+ bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
+ Comment, Comment.Preproc, Text, Keyword, Text,
+ Comment.Preproc)),
+ # raw jinja blocks
+ (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
+ r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
+ bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
+ Text, Comment.Preproc, Text, Keyword, Text,
+ Comment.Preproc)),
+ # filter blocks
+ (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
+ bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
+ 'block'),
+ (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
+ bygroups(Comment.Preproc, Text, Keyword), 'block'),
+ (r'\{', Other)
+ ],
+ 'varnames': [
+ (r'(\|)(\s*)([a-zA-Z_]\w*)',
+ bygroups(Operator, Text, Name.Function)),
+ (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
+ bygroups(Keyword, Text, Keyword, Text, Name.Function)),
+ (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
+ (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
+ r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
+ Keyword),
+ (r'(loop|block|super|forloop)\b', Name.Builtin),
+ (r'[a-zA-Z_][\w-]*', Name.Variable),
+ (r'\.\w+', Name.Variable),
+ (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator),
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ ],
+ 'var': [
+ (r'\s+', Text),
+ (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
+ include('varnames')
+ ],
+ 'block': [
+ (r'\s+', Text),
+ (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
+ include('varnames'),
+ (r'.', Punctuation)
+ ]
+ }
+
+ def analyse_text(text):
+ rv = 0.0
+ if re.search(r'\{%\s*(block|extends)', text) is not None:
+ rv += 0.4
+ if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
+ rv += 0.1
+ if re.search(r'\{\{.*?\}\}', text) is not None:
+ rv += 0.1
+ return rv
+
+
+class MyghtyLexer(RegexLexer):
+ """
+ Generic myghty templates lexer. Code that isn't Myghty
+ markup is yielded as `Token.Other`.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'Myghty'
+ url = 'http://www.myghty.org/'
+ aliases = ['myghty']
+ filenames = ['*.myt', 'autodelegate']
+ mimetypes = ['application/x-myghty']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
+ bygroups(Name.Tag, Text, Name.Function, Name.Tag,
+ using(this), Name.Tag)),
+ (r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
+ bygroups(Name.Tag, Name.Function, Name.Tag,
+ using(PythonLexer), Name.Tag)),
+ (r'(<&[^|])(.*?)(,.*?)?(&>)',
+ bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
+ (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
+ bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
+ (r'</&>', Name.Tag),
+ (r'(?s)(<%!?)(.*?)(%>)',
+ bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
+ (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
+ (r'(?<=^)(%)([^\n]*)(\n|\Z)',
+ bygroups(Name.Tag, using(PythonLexer), Other)),
+ (r"""(?sx)
+ (.+?) # anything, followed by:
+ (?:
+ (?<=\n)(?=[%#]) | # an eval or comment line
+ (?=</?[%&]) | # a substitution or block or
+ # call start or end
+ # - don't consume
+ (\\\n) | # an escaped newline
+ \Z # end of string
+ )""", bygroups(Other, Operator)),
+ ]
+ }
+
+
+class MyghtyHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `MyghtyLexer` that highlights unlexed data
+ with the `HtmlLexer`.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'HTML+Myghty'
+ aliases = ['html+myghty']
+ mimetypes = ['text/html+myghty']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, MyghtyLexer, **options)
+
+
+class MyghtyXmlLexer(DelegatingLexer):
+ """
+ Subclass of the `MyghtyLexer` that highlights unlexed data
+ with the `XmlLexer`.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'XML+Myghty'
+ aliases = ['xml+myghty']
+ mimetypes = ['application/xml+myghty']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, MyghtyLexer, **options)
+
+
+class MyghtyJavascriptLexer(DelegatingLexer):
+ """
+ Subclass of the `MyghtyLexer` that highlights unlexed data
+ with the `JavascriptLexer`.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'JavaScript+Myghty'
+ aliases = ['javascript+myghty', 'js+myghty']
+ mimetypes = ['application/x-javascript+myghty',
+ 'text/x-javascript+myghty',
+ 'text/javascript+mygthy']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, MyghtyLexer, **options)
+
+
+class MyghtyCssLexer(DelegatingLexer):
+ """
+ Subclass of the `MyghtyLexer` that highlights unlexed data
+ with the `CssLexer`.
+
+ .. versionadded:: 0.6
+ """
+
+ name = 'CSS+Myghty'
+ aliases = ['css+myghty']
+ mimetypes = ['text/css+myghty']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, MyghtyLexer, **options)
+
+
+class MasonLexer(RegexLexer):
+ """
+ Generic mason templates lexer. Stolen from Myghty lexer. Code that isn't
+ Mason markup is HTML.
+
+ .. versionadded:: 1.4
+ """
+ name = 'Mason'
+ url = 'http://www.masonhq.com/'
+ aliases = ['mason']
+ filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
+ mimetypes = ['application/x-mason']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'(?s)(<%doc>)(.*?)(</%doc>)',
+ bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
+ (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
+ bygroups(Name.Tag, Whitespace, Name.Function, Name.Tag,
+ using(this), Name.Tag)),
+ (r'(?s)(<%(\w+)(.*?)(>))(.*?)(</%\2\s*>)',
+ bygroups(Name.Tag, None, None, None, using(PerlLexer), Name.Tag)),
+ (r'(?s)(<&[^|])(.*?)(,.*?)?(&>)',
+ bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
+ (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
+ bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
+ (r'</&>', Name.Tag),
+ (r'(?s)(<%!?)(.*?)(%>)',
+ bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
+ (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
+ (r'(?<=^)(%)([^\n]*)(\n|\Z)',
+ bygroups(Name.Tag, using(PerlLexer), Other)),
+ (r"""(?sx)
+ (.+?) # anything, followed by:
+ (?:
+ (?<=\n)(?=[%#]) | # an eval or comment line
+ (?=</?[%&]) | # a substitution or block or
+ # call start or end
+ # - don't consume
+ (\\\n) | # an escaped newline
+ \Z # end of string
+ )""", bygroups(using(HtmlLexer), Operator)),
+ ]
+ }
+
+ def analyse_text(text):
+ result = 0.0
+ if re.search(r'</%(class|doc|init)>', text) is not None:
+ result = 1.0
+ elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
+ result = 0.11
+ return result
+
+
+class MakoLexer(RegexLexer):
+ """
+ Generic mako templates lexer. Code that isn't Mako
+ markup is yielded as `Token.Other`.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'Mako'
+ url = 'http://www.makotemplates.org/'
+ aliases = ['mako']
+ filenames = ['*.mao']
+ mimetypes = ['application/x-mako']
+
+ tokens = {
+ 'root': [
+ (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
+ bygroups(Text.Whitespace, Comment.Preproc, Keyword, Other)),
+ (r'(\s*)(%)([^\n]*)(\n|\Z)',
+ bygroups(Text.Whitespace, Comment.Preproc, using(PythonLexer), Other)),
+ (r'(\s*)(##[^\n]*)(\n|\Z)',
+ bygroups(Text.Whitespace, Comment.Single, Text.Whitespace)),
+ (r'(?s)<%doc>.*?</%doc>', Comment.Multiline),
+ (r'(<%)([\w.:]+)',
+ bygroups(Comment.Preproc, Name.Builtin), 'tag'),
+ (r'(</%)([\w.:]+)(>)',
+ bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
+ (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
+ (r'(?s)(<%(?:!?))(.*?)(%>)',
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
+ (r'(\$\{)(.*?)(\})',
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
+ (r'''(?sx)
+ (.+?) # anything, followed by:
+ (?:
+ (?<=\n)(?=%|\#\#) | # an eval or comment line
+ (?=\#\*) | # multiline comment
+ (?=</?%) | # a python block
+ # call start or end
+ (?=\$\{) | # a substitution
+ (?<=\n)(?=\s*%) |
+ # - don't consume
+ (\\\n) | # an escaped newline
+ \Z # end of string
+ )
+ ''', bygroups(Other, Operator)),
+ (r'\s+', Text),
+ ],
+ 'ondeftags': [
+ (r'<%', Comment.Preproc),
+ (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
+ include('tag'),
+ ],
+ 'tag': [
+ (r'((?:\w+)\s*=)(\s*)(".*?")',
+ bygroups(Name.Attribute, Text, String)),
+ (r'/?\s*>', Comment.Preproc, '#pop'),
+ (r'\s+', Text),
+ ],
+ 'attr': [
+ ('".*?"', String, '#pop'),
+ ("'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop'),
+ ],
+ }
+
+
+class MakoHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `MakoLexer` that highlights unlexed data
+ with the `HtmlLexer`.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'HTML+Mako'
+ aliases = ['html+mako']
+ mimetypes = ['text/html+mako']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, MakoLexer, **options)
+
+
+class MakoXmlLexer(DelegatingLexer):
+ """
+ Subclass of the `MakoLexer` that highlights unlexed data
+ with the `XmlLexer`.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'XML+Mako'
+ aliases = ['xml+mako']
+ mimetypes = ['application/xml+mako']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, MakoLexer, **options)
+
+
+class MakoJavascriptLexer(DelegatingLexer):
+ """
+ Subclass of the `MakoLexer` that highlights unlexed data
+ with the `JavascriptLexer`.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'JavaScript+Mako'
+ aliases = ['javascript+mako', 'js+mako']
+ mimetypes = ['application/x-javascript+mako',
+ 'text/x-javascript+mako',
+ 'text/javascript+mako']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, MakoLexer, **options)
+
+
+class MakoCssLexer(DelegatingLexer):
+ """
+ Subclass of the `MakoLexer` that highlights unlexed data
+ with the `CssLexer`.
+
+ .. versionadded:: 0.7
+ """
+
+ name = 'CSS+Mako'
+ aliases = ['css+mako']
+ mimetypes = ['text/css+mako']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, MakoLexer, **options)
+
+
+# Genshi and Cheetah lexers courtesy of Matt Good.
+
+class CheetahPythonLexer(Lexer):
+ """
+ Lexer for handling Cheetah's special $ tokens in Python syntax.
+ """
+
+ def get_tokens_unprocessed(self, text):
+ pylexer = PythonLexer(**self.options)
+ for pos, type_, value in pylexer.get_tokens_unprocessed(text):
+ if type_ == Token.Error and value == '$':
+ type_ = Comment.Preproc
+ yield pos, type_, value
+
+
+class CheetahLexer(RegexLexer):
+ """
+ Generic cheetah templates lexer. Code that isn't Cheetah
+ markup is yielded as `Token.Other`. This also works for
+ `spitfire templates`_ which use the same syntax.
+
+ .. _spitfire templates: http://code.google.com/p/spitfire/
+ """
+
+ name = 'Cheetah'
+ url = 'http://www.cheetahtemplate.org/'
+ aliases = ['cheetah', 'spitfire']
+ filenames = ['*.tmpl', '*.spt']
+ mimetypes = ['application/x-cheetah', 'application/x-spitfire']
+
+ tokens = {
+ 'root': [
+ (r'(##[^\n]*)$',
+ (bygroups(Comment))),
+ (r'#[*](.|\n)*?[*]#', Comment),
+ (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
+ (r'#slurp$', Comment.Preproc),
+ (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
+ (bygroups(Comment.Preproc, using(CheetahPythonLexer),
+ Comment.Preproc))),
+ # TODO support other Python syntax like $foo['bar']
+ (r'(\$)([a-zA-Z_][\w.]*\w)',
+ bygroups(Comment.Preproc, using(CheetahPythonLexer))),
+ (r'(?s)(\$\{!?)(.*?)(\})',
+ bygroups(Comment.Preproc, using(CheetahPythonLexer),
+ Comment.Preproc)),
+ (r'''(?sx)
+ (.+?) # anything, followed by:
+ (?:
+ (?=\#[#a-zA-Z]*) | # an eval comment
+ (?=\$[a-zA-Z_{]) | # a substitution
+ \Z # end of string
+ )
+ ''', Other),
+ (r'\s+', Text),
+ ],
+ }
+
+
+class CheetahHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `CheetahLexer` that highlights unlexed data
+ with the `HtmlLexer`.
+ """
+
+ name = 'HTML+Cheetah'
+ aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
+ mimetypes = ['text/html+cheetah', 'text/html+spitfire']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, CheetahLexer, **options)
+
+
+class CheetahXmlLexer(DelegatingLexer):
+ """
+ Subclass of the `CheetahLexer` that highlights unlexed data
+ with the `XmlLexer`.
+ """
+
+ name = 'XML+Cheetah'
+ aliases = ['xml+cheetah', 'xml+spitfire']
+ mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, CheetahLexer, **options)
+
+
+class CheetahJavascriptLexer(DelegatingLexer):
+ """
+ Subclass of the `CheetahLexer` that highlights unlexed data
+ with the `JavascriptLexer`.
+ """
+
+ name = 'JavaScript+Cheetah'
+ aliases = ['javascript+cheetah', 'js+cheetah',
+ 'javascript+spitfire', 'js+spitfire']
+ mimetypes = ['application/x-javascript+cheetah',
+ 'text/x-javascript+cheetah',
+ 'text/javascript+cheetah',
+ 'application/x-javascript+spitfire',
+ 'text/x-javascript+spitfire',
+ 'text/javascript+spitfire']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, CheetahLexer, **options)
+
+
+class GenshiTextLexer(RegexLexer):
+ """
+ A lexer that highlights genshi text templates.
+ """
+
+ name = 'Genshi Text'
+ url = 'http://genshi.edgewall.org/'
+ aliases = ['genshitext']
+ mimetypes = ['application/x-genshi-text', 'text/x-genshi']
+
+ tokens = {
+ 'root': [
+ (r'[^#$\s]+', Other),
+ (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
+ (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
+ include('variable'),
+ (r'[#$\s]', Other),
+ ],
+ 'directive': [
+ (r'\n', Text, '#pop'),
+ (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
+ (r'(choose|when|with)([^\S\n]+)(.*)',
+ bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
+ (r'(choose|otherwise)\b', Keyword, '#pop'),
+ (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
+ ],
+ 'variable': [
+ (r'(?<!\$)(\$\{)(.+?)(\})',
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
+ (r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
+ Name.Variable),
+ ]
+ }
+
+
+class GenshiMarkupLexer(RegexLexer):
+ """
+ Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
+ `GenshiLexer`.
+ """
+
+ flags = re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'[^<$]+', Other),
+ (r'(<\?python)(.*?)(\?>)',
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
+ # yield style and script blocks as Other
+ (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
+ (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
+ (r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
+ include('variable'),
+ (r'[<$]', Other),
+ ],
+ 'pytag': [
+ (r'\s+', Text),
+ (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
+ (r'/?\s*>', Name.Tag, '#pop'),
+ ],
+ 'pyattr': [
+ ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
+ ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
+ (r'[^\s>]+', String, '#pop'),
+ ],
+ 'tag': [
+ (r'\s+', Text),
+ (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
+ (r'[\w:-]+\s*=', Name.Attribute, 'attr'),
+ (r'/?\s*>', Name.Tag, '#pop'),
+ ],
+ 'attr': [
+ ('"', String, 'attr-dstring'),
+ ("'", String, 'attr-sstring'),
+ (r'[^\s>]*', String, '#pop')
+ ],
+ 'attr-dstring': [
+ ('"', String, '#pop'),
+ include('strings'),
+ ("'", String)
+ ],
+ 'attr-sstring': [
+ ("'", String, '#pop'),
+ include('strings'),
+ ("'", String)
+ ],
+ 'strings': [
+ ('[^"\'$]+', String),
+ include('variable')
+ ],
+ 'variable': [
+ (r'(?<!\$)(\$\{)(.+?)(\})',
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
+ (r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
+ Name.Variable),
+ ]
+ }
+
+
+class HtmlGenshiLexer(DelegatingLexer):
+ """
+ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
+ `kid <http://kid-templating.org/>`_ kid HTML templates.
+ """
+
+ name = 'HTML+Genshi'
+ aliases = ['html+genshi', 'html+kid']
+ alias_filenames = ['*.html', '*.htm', '*.xhtml']
+ mimetypes = ['text/html+genshi']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, GenshiMarkupLexer, **options)
+
+ def analyse_text(text):
+ rv = 0.0
+ if re.search(r'\$\{.*?\}', text) is not None:
+ rv += 0.2
+ if re.search(r'py:(.*?)=["\']', text) is not None:
+ rv += 0.2
+ return rv + HtmlLexer.analyse_text(text) - 0.01
+
+
+class GenshiLexer(DelegatingLexer):
+ """
+ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
+ `kid <http://kid-templating.org/>`_ kid XML templates.
+ """
+
+ name = 'Genshi'
+ aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
+ filenames = ['*.kid']
+ alias_filenames = ['*.xml']
+ mimetypes = ['application/x-genshi', 'application/x-kid']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, GenshiMarkupLexer, **options)
+
+ def analyse_text(text):
+ rv = 0.0
+ if re.search(r'\$\{.*?\}', text) is not None:
+ rv += 0.2
+ if re.search(r'py:(.*?)=["\']', text) is not None:
+ rv += 0.2
+ return rv + XmlLexer.analyse_text(text) - 0.01
+
+
+class JavascriptGenshiLexer(DelegatingLexer):
+ """
+ A lexer that highlights javascript code in genshi text templates.
+ """
+
+ name = 'JavaScript+Genshi Text'
+ aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
+ 'javascript+genshi']
+ alias_filenames = ['*.js']
+ mimetypes = ['application/x-javascript+genshi',
+ 'text/x-javascript+genshi',
+ 'text/javascript+genshi']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, GenshiTextLexer, **options)
+
+ def analyse_text(text):
+ return GenshiLexer.analyse_text(text) - 0.05
+
+
+class CssGenshiLexer(DelegatingLexer):
+ """
+ A lexer that highlights CSS definitions in genshi text templates.
+ """
+
+ name = 'CSS+Genshi Text'
+ aliases = ['css+genshitext', 'css+genshi']
+ alias_filenames = ['*.css']
+ mimetypes = ['text/css+genshi']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, GenshiTextLexer, **options)
+
+ def analyse_text(text):
+ return GenshiLexer.analyse_text(text) - 0.05
+
+
+class RhtmlLexer(DelegatingLexer):
+ """
+ Subclass of the ERB lexer that highlights the unlexed data with the
+ html lexer.
+
+ Nested Javascript and CSS is highlighted too.
+ """
+
+ name = 'RHTML'
+ aliases = ['rhtml', 'html+erb', 'html+ruby']
+ filenames = ['*.rhtml']
+ alias_filenames = ['*.html', '*.htm', '*.xhtml']
+ mimetypes = ['text/html+ruby']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, ErbLexer, **options)
+
+ def analyse_text(text):
+ rv = ErbLexer.analyse_text(text) - 0.01
+ if html_doctype_matches(text):
+ # one more than the XmlErbLexer returns
+ rv += 0.5
+ return rv
+
+
+class XmlErbLexer(DelegatingLexer):
+ """
+ Subclass of `ErbLexer` which highlights data outside preprocessor
+ directives with the `XmlLexer`.
+ """
+
+ name = 'XML+Ruby'
+ aliases = ['xml+ruby', 'xml+erb']
+ alias_filenames = ['*.xml']
+ mimetypes = ['application/xml+ruby']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, ErbLexer, **options)
+
+ def analyse_text(text):
+ rv = ErbLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ return rv
+
+
+class CssErbLexer(DelegatingLexer):
+ """
+ Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
+ """
+
+ name = 'CSS+Ruby'
+ aliases = ['css+ruby', 'css+erb']
+ alias_filenames = ['*.css']
+ mimetypes = ['text/css+ruby']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, ErbLexer, **options)
+
+ def analyse_text(text):
+ return ErbLexer.analyse_text(text) - 0.05
+
+
+class JavascriptErbLexer(DelegatingLexer):
+ """
+ Subclass of `ErbLexer` which highlights unlexed data with the
+ `JavascriptLexer`.
+ """
+
+ name = 'JavaScript+Ruby'
+ aliases = ['javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb']
+ alias_filenames = ['*.js']
+ mimetypes = ['application/x-javascript+ruby',
+ 'text/x-javascript+ruby',
+ 'text/javascript+ruby']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, ErbLexer, **options)
+
+ def analyse_text(text):
+ return ErbLexer.analyse_text(text) - 0.05
+
+
+class HtmlPhpLexer(DelegatingLexer):
+ """
+ Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
+
+ Nested Javascript and CSS is highlighted too.
+ """
+
+ name = 'HTML+PHP'
+ aliases = ['html+php']
+ filenames = ['*.phtml']
+ alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
+ '*.php[345]']
+ mimetypes = ['application/x-php',
+ 'application/x-httpd-php', 'application/x-httpd-php3',
+ 'application/x-httpd-php4', 'application/x-httpd-php5']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, PhpLexer, **options)
+
+ def analyse_text(text):
+ rv = PhpLexer.analyse_text(text) - 0.01
+ if html_doctype_matches(text):
+ rv += 0.5
+ return rv
+
+
+class XmlPhpLexer(DelegatingLexer):
+ """
+ Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
+ """
+
+ name = 'XML+PHP'
+ aliases = ['xml+php']
+ alias_filenames = ['*.xml', '*.php', '*.php[345]']
+ mimetypes = ['application/xml+php']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, PhpLexer, **options)
+
+ def analyse_text(text):
+ rv = PhpLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ return rv
+
+
+class CssPhpLexer(DelegatingLexer):
+ """
+ Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
+ """
+
+ name = 'CSS+PHP'
+ aliases = ['css+php']
+ alias_filenames = ['*.css']
+ mimetypes = ['text/css+php']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, PhpLexer, **options)
+
+ def analyse_text(text):
+ return PhpLexer.analyse_text(text) - 0.05
+
+
+class JavascriptPhpLexer(DelegatingLexer):
+ """
+ Subclass of `PhpLexer` which highlights unmatched data with the
+ `JavascriptLexer`.
+ """
+
+ name = 'JavaScript+PHP'
+ aliases = ['javascript+php', 'js+php']
+ alias_filenames = ['*.js']
+ mimetypes = ['application/x-javascript+php',
+ 'text/x-javascript+php',
+ 'text/javascript+php']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, PhpLexer, **options)
+
+ def analyse_text(text):
+ return PhpLexer.analyse_text(text)
+
+
+class HtmlSmartyLexer(DelegatingLexer):
+ """
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
+ `HtmlLexer`.
+
+ Nested Javascript and CSS is highlighted too.
+ """
+
+ name = 'HTML+Smarty'
+ aliases = ['html+smarty']
+ alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
+ mimetypes = ['text/html+smarty']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, SmartyLexer, **options)
+
+ def analyse_text(text):
+ rv = SmartyLexer.analyse_text(text) - 0.01
+ if html_doctype_matches(text):
+ rv += 0.5
+ return rv
+
+
+class XmlSmartyLexer(DelegatingLexer):
+ """
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
+ `XmlLexer`.
+ """
+
+ name = 'XML+Smarty'
+ aliases = ['xml+smarty']
+ alias_filenames = ['*.xml', '*.tpl']
+ mimetypes = ['application/xml+smarty']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, SmartyLexer, **options)
+
+ def analyse_text(text):
+ rv = SmartyLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ return rv
+
+
+class CssSmartyLexer(DelegatingLexer):
+ """
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
+ `CssLexer`.
+ """
+
+ name = 'CSS+Smarty'
+ aliases = ['css+smarty']
+ alias_filenames = ['*.css', '*.tpl']
+ mimetypes = ['text/css+smarty']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, SmartyLexer, **options)
+
+ def analyse_text(text):
+ return SmartyLexer.analyse_text(text) - 0.05
+
+
+class JavascriptSmartyLexer(DelegatingLexer):
+ """
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
+ `JavascriptLexer`.
+ """
+
+ name = 'JavaScript+Smarty'
+ aliases = ['javascript+smarty', 'js+smarty']
+ alias_filenames = ['*.js', '*.tpl']
+ mimetypes = ['application/x-javascript+smarty',
+ 'text/x-javascript+smarty',
+ 'text/javascript+smarty']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, SmartyLexer, **options)
+
+ def analyse_text(text):
+ return SmartyLexer.analyse_text(text) - 0.05
+
+
+class HtmlDjangoLexer(DelegatingLexer):
+ """
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
+ `HtmlLexer`.
+
+ Nested Javascript and CSS is highlighted too.
+ """
+
+ name = 'HTML+Django/Jinja'
+ aliases = ['html+django', 'html+jinja', 'htmldjango']
+ filenames = ['*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2']
+ alias_filenames = ['*.html', '*.htm', '*.xhtml']
+ mimetypes = ['text/html+django', 'text/html+jinja']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, DjangoLexer, **options)
+
+ def analyse_text(text):
+ rv = DjangoLexer.analyse_text(text) - 0.01
+ if html_doctype_matches(text):
+ rv += 0.5
+ return rv
+
+
+class XmlDjangoLexer(DelegatingLexer):
+ """
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
+ `XmlLexer`.
+ """
+
+ name = 'XML+Django/Jinja'
+ aliases = ['xml+django', 'xml+jinja']
+ filenames = ['*.xml.j2', '*.xml.jinja2']
+ alias_filenames = ['*.xml']
+ mimetypes = ['application/xml+django', 'application/xml+jinja']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, DjangoLexer, **options)
+
+ def analyse_text(text):
+ rv = DjangoLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ return rv
+
+
+class CssDjangoLexer(DelegatingLexer):
+ """
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
+ `CssLexer`.
+ """
+
+ name = 'CSS+Django/Jinja'
+ aliases = ['css+django', 'css+jinja']
+ filenames = ['*.css.j2', '*.css.jinja2']
+ alias_filenames = ['*.css']
+ mimetypes = ['text/css+django', 'text/css+jinja']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, DjangoLexer, **options)
+
+ def analyse_text(text):
+ return DjangoLexer.analyse_text(text) - 0.05
+
+
+class JavascriptDjangoLexer(DelegatingLexer):
+ """
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
+ `JavascriptLexer`.
+ """
+
+ name = 'JavaScript+Django/Jinja'
+ aliases = ['javascript+django', 'js+django',
+ 'javascript+jinja', 'js+jinja']
+ filenames = ['*.js.j2', '*.js.jinja2']
+ alias_filenames = ['*.js']
+ mimetypes = ['application/x-javascript+django',
+ 'application/x-javascript+jinja',
+ 'text/x-javascript+django',
+ 'text/x-javascript+jinja',
+ 'text/javascript+django',
+ 'text/javascript+jinja']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, DjangoLexer, **options)
+
+ def analyse_text(text):
+ return DjangoLexer.analyse_text(text) - 0.05
+
+
+class JspRootLexer(RegexLexer):
+ """
+ Base for the `JspLexer`. Yields `Token.Other` for area outside of
+ JSP tags.
+
+ .. versionadded:: 0.7
+ """
+
+ tokens = {
+ 'root': [
+ (r'<%\S?', Keyword, 'sec'),
+ # FIXME: I want to make these keywords but still parse attributes.
+ (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
+ Keyword),
+ (r'[^<]+', Other),
+ (r'<', Other),
+ ],
+ 'sec': [
+ (r'%>', Keyword, '#pop'),
+ # note: '\w\W' != '.' without DOTALL.
+ (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
+ ],
+ }
+
+
+class JspLexer(DelegatingLexer):
+ """
+ Lexer for Java Server Pages.
+
+ .. versionadded:: 0.7
+ """
+ name = 'Java Server Page'
+ aliases = ['jsp']
+ filenames = ['*.jsp']
+ mimetypes = ['application/x-jsp']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, JspRootLexer, **options)
+
+ def analyse_text(text):
+ rv = JavaLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ if '<%' in text and '%>' in text:
+ rv += 0.1
+ return rv
+
+
+class EvoqueLexer(RegexLexer):
+ """
+ For files using the Evoque templating system.
+
+ .. versionadded:: 1.1
+ """
+ name = 'Evoque'
+ aliases = ['evoque']
+ filenames = ['*.evoque']
+ mimetypes = ['application/x-evoque']
+
+ flags = re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'[^#$]+', Other),
+ (r'#\[', Comment.Multiline, 'comment'),
+ (r'\$\$', Other),
+ # svn keywords
+ (r'\$\w+:[^$\n]*\$', Comment.Multiline),
+ # directives: begin, end
+ (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
+ bygroups(Punctuation, Name.Builtin, Punctuation, None,
+ String, Punctuation)),
+ # directives: evoque, overlay
+ # see doc for handling first name arg: /directives/evoque/
+ # + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
+ # should be using(PythonLexer), not passed out as String
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
+ r'(.*?)((?(4)%)\})',
+ bygroups(Punctuation, Name.Builtin, Punctuation, None,
+ String, using(PythonLexer), Punctuation)),
+ # directives: if, for, prefer, test
+ (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
+ bygroups(Punctuation, Name.Builtin, Punctuation, None,
+ using(PythonLexer), Punctuation)),
+ # directive clauses (no {} expression)
+ (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
+ # expressions
+ (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
+ bygroups(Punctuation, None, using(PythonLexer),
+ Name.Builtin, None, None, Punctuation)),
+ (r'#', Other),
+ ],
+ 'comment': [
+ (r'[^\]#]', Comment.Multiline),
+ (r'#\[', Comment.Multiline, '#push'),
+ (r'\]#', Comment.Multiline, '#pop'),
+ (r'[\]#]', Comment.Multiline)
+ ],
+ }
+
+ def analyse_text(text):
+ """Evoque templates use $evoque, which is unique."""
+ if '$evoque' in text:
+ return 1
+
+class EvoqueHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `EvoqueLexer` that highlights unlexed data with the
+ `HtmlLexer`.
+
+ .. versionadded:: 1.1
+ """
+ name = 'HTML+Evoque'
+ aliases = ['html+evoque']
+ filenames = ['*.html']
+ mimetypes = ['text/html+evoque']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, EvoqueLexer, **options)
+
+ def analyse_text(text):
+ return EvoqueLexer.analyse_text(text)
+
+
+class EvoqueXmlLexer(DelegatingLexer):
+ """
+ Subclass of the `EvoqueLexer` that highlights unlexed data with the
+ `XmlLexer`.
+
+ .. versionadded:: 1.1
+ """
+ name = 'XML+Evoque'
+ aliases = ['xml+evoque']
+ filenames = ['*.xml']
+ mimetypes = ['application/xml+evoque']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, EvoqueLexer, **options)
+
+ def analyse_text(text):
+ return EvoqueLexer.analyse_text(text)
+
+
+class ColdfusionLexer(RegexLexer):
+ """
+ Coldfusion statements
+ """
+ name = 'cfstatement'
+ aliases = ['cfs']
+ filenames = []
+ mimetypes = []
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'//.*?\n', Comment.Single),
+ (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
+ (r'\+\+|--', Operator),
+ (r'[-+*/^&=!]', Operator),
+ (r'<=|>=|<|>|==', Operator),
+ (r'mod\b', Operator),
+ (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
+ (r'\|\||&&', Operator),
+ (r'\?', Operator),
+ (r'"', String.Double, 'string'),
+ # There is a special rule for allowing html in single quoted
+ # strings, evidently.
+ (r"'.*?'", String.Single),
+ (r'\d+', Number),
+ (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
+ r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
+ r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(application|session|client|cookie|super|this|variables|arguments)\b',
+ Name.Constant),
+ (r'([a-z_$][\w.]*)(\s*)(\()',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'[a-z_$][\w.]*', Name.Variable),
+ (r'[()\[\]{};:,.\\]', Punctuation),
+ (r'\s+', Text),
+ ],
+ 'string': [
+ (r'""', String.Double),
+ (r'#.+?#', String.Interp),
+ (r'[^"#]+', String.Double),
+ (r'#', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ }
+
+
+class ColdfusionMarkupLexer(RegexLexer):
+ """
+ Coldfusion markup only
+ """
+ name = 'Coldfusion'
+ aliases = ['cf']
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'[^<]+', Other),
+ include('tags'),
+ (r'<[^<>]*', Other),
+ ],
+ 'tags': [
+ (r'<!---', Comment.Multiline, 'cfcomment'),
+ (r'(?s)<!--.*?-->', Comment),
+ (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
+ (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
+ bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
+ # negative lookbehind is for strings with embedded >
+ (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
+ r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
+ r'mailpart|mail|header|content|zip|image|lock|argument|try|'
+ r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
+ bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
+ ],
+ 'cfoutput': [
+ (r'[^#<]+', Other),
+ (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
+ Punctuation)),
+ # (r'<cfoutput.*?>', Name.Builtin, '#push'),
+ (r'</cfoutput.*?>', Name.Builtin, '#pop'),
+ include('tags'),
+ (r'(?s)<[^<>]*', Other),
+ (r'#', Other),
+ ],
+ 'cfcomment': [
+ (r'<!---', Comment.Multiline, '#push'),
+ (r'--->', Comment.Multiline, '#pop'),
+ (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
+ ],
+ }
+
+
+class ColdfusionHtmlLexer(DelegatingLexer):
+ """
+ Coldfusion markup in html
+ """
+ name = 'Coldfusion HTML'
+ aliases = ['cfm']
+ filenames = ['*.cfm', '*.cfml']
+ mimetypes = ['application/x-coldfusion']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, ColdfusionMarkupLexer, **options)
+
+
+class ColdfusionCFCLexer(DelegatingLexer):
+ """
+ Coldfusion markup/script components
+
+ .. versionadded:: 2.0
+ """
+ name = 'Coldfusion CFC'
+ aliases = ['cfc']
+ filenames = ['*.cfc']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super().__init__(ColdfusionHtmlLexer, ColdfusionLexer, **options)
+
+
+class SspLexer(DelegatingLexer):
+ """
+ Lexer for Scalate Server Pages.
+
+ .. versionadded:: 1.4
+ """
+ name = 'Scalate Server Page'
+ aliases = ['ssp']
+ filenames = ['*.ssp']
+ mimetypes = ['application/x-ssp']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, JspRootLexer, **options)
+
+ def analyse_text(text):
+ rv = 0.0
+ if re.search(r'val \w+\s*:', text):
+ rv += 0.6
+ if looks_like_xml(text):
+ rv += 0.2
+ if '<%' in text and '%>' in text:
+ rv += 0.1
+ return rv
+
+
+class TeaTemplateRootLexer(RegexLexer):
+ """
+ Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
+ code blocks.
+
+ .. versionadded:: 1.5
+ """
+
+ tokens = {
+ 'root': [
+ (r'<%\S?', Keyword, 'sec'),
+ (r'[^<]+', Other),
+ (r'<', Other),
+ ],
+ 'sec': [
+ (r'%>', Keyword, '#pop'),
+ # note: '\w\W' != '.' without DOTALL.
+ (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
+ ],
+ }
+
+
+class TeaTemplateLexer(DelegatingLexer):
+ """
+ Lexer for `Tea Templates <http://teatrove.org/>`_.
+
+ .. versionadded:: 1.5
+ """
+ name = 'Tea'
+ aliases = ['tea']
+ filenames = ['*.tea']
+ mimetypes = ['text/x-tea']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, TeaTemplateRootLexer, **options)
+
+ def analyse_text(text):
+ rv = TeaLangLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ if '<%' in text and '%>' in text:
+ rv += 0.1
+ return rv
+
+
+class LassoHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `LassoLexer` which highlights unhandled data with the
+ `HtmlLexer`.
+
+ Nested JavaScript and CSS is also highlighted.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'HTML+Lasso'
+ aliases = ['html+lasso']
+ alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
+ '*.incl', '*.inc', '*.las']
+ mimetypes = ['text/html+lasso',
+ 'application/x-httpd-lasso',
+ 'application/x-httpd-lasso[89]']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, LassoLexer, **options)
+
+ def analyse_text(text):
+ rv = LassoLexer.analyse_text(text) - 0.01
+ if html_doctype_matches(text): # same as HTML lexer
+ rv += 0.5
+ return rv
+
+
+class LassoXmlLexer(DelegatingLexer):
+ """
+ Subclass of the `LassoLexer` which highlights unhandled data with the
+ `XmlLexer`.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'XML+Lasso'
+ aliases = ['xml+lasso']
+ alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
+ '*.incl', '*.inc', '*.las']
+ mimetypes = ['application/xml+lasso']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, LassoLexer, **options)
+
+ def analyse_text(text):
+ rv = LassoLexer.analyse_text(text) - 0.01
+ if looks_like_xml(text):
+ rv += 0.4
+ return rv
+
+
+class LassoCssLexer(DelegatingLexer):
+ """
+ Subclass of the `LassoLexer` which highlights unhandled data with the
+ `CssLexer`.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'CSS+Lasso'
+ aliases = ['css+lasso']
+ alias_filenames = ['*.css']
+ mimetypes = ['text/css+lasso']
+
+ def __init__(self, **options):
+ options['requiredelimiters'] = True
+ super().__init__(CssLexer, LassoLexer, **options)
+
+ def analyse_text(text):
+ rv = LassoLexer.analyse_text(text) - 0.05
+ if re.search(r'\w+:[^;]+;', text):
+ rv += 0.1
+ if 'padding:' in text:
+ rv += 0.1
+ return rv
+
+
+class LassoJavascriptLexer(DelegatingLexer):
+ """
+ Subclass of the `LassoLexer` which highlights unhandled data with the
+ `JavascriptLexer`.
+
+ .. versionadded:: 1.6
+ """
+
+ name = 'JavaScript+Lasso'
+ aliases = ['javascript+lasso', 'js+lasso']
+ alias_filenames = ['*.js']
+ mimetypes = ['application/x-javascript+lasso',
+ 'text/x-javascript+lasso',
+ 'text/javascript+lasso']
+
+ def __init__(self, **options):
+ options['requiredelimiters'] = True
+ super().__init__(JavascriptLexer, LassoLexer, **options)
+
+ def analyse_text(text):
+ rv = LassoLexer.analyse_text(text) - 0.05
+ return rv
+
+
+class HandlebarsLexer(RegexLexer):
+ """
+ Generic handlebars template lexer.
+
+ Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
+ Everything else is left for a delegating lexer.
+
+ .. versionadded:: 2.0
+ """
+
+ name = "Handlebars"
+ url = 'https://handlebarsjs.com/'
+ aliases = ['handlebars']
+
+ tokens = {
+ 'root': [
+ (r'[^{]+', Other),
+
+ # Comment start {{! }} or {{!--
+ (r'\{\{!.*\}\}', Comment),
+
+ # HTML Escaping open {{{expression
+ (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
+
+ # {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~
+ (r'(\{\{)([#~/]+)([^\s}]*)',
+ bygroups(Comment.Preproc, Number.Attribute, Number.Attribute), 'tag'),
+ (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
+ ],
+
+ 'tag': [
+ (r'\s+', Text),
+ # HTML Escaping close }}}
+ (r'\}\}\}', Comment.Special, '#pop'),
+ # blockClose}}, includes optional tilde ~
+ (r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'),
+
+ # {{opt=something}}
+ (r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)),
+
+ # Partials {{> ...}}
+ (r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)),
+ (r'(#?>)(\s*)([\w-]+)', bygroups(Keyword, Text, Name.Variable)),
+ (r'(>)(\s*)(\()', bygroups(Keyword, Text, Punctuation),
+ 'dynamic-partial'),
+
+ include('generic'),
+ ],
+ 'dynamic-partial': [
+ (r'\s+', Text),
+ (r'\)', Punctuation, '#pop'),
+
+ (r'(lookup)(\s+)(\.|this)(\s+)', bygroups(Keyword, Text,
+ Name.Variable, Text)),
+ (r'(lookup)(\s+)(\S+)', bygroups(Keyword, Text,
+ using(this, state='variable'))),
+ (r'[\w-]+', Name.Function),
+
+ include('generic'),
+ ],
+ 'variable': [
+ (r'[()/@a-zA-Z][\w-]*', Name.Variable),
+ (r'\.[\w-]+', Name.Variable),
+ (r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable),
+ ],
+ 'generic': [
+ include('variable'),
+
+ # borrowed from DjangoLexer
+ (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ ]
+ }
+
+
+class HandlebarsHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `HandlebarsLexer` that highlights unlexed data with the
+ `HtmlLexer`.
+
+ .. versionadded:: 2.0
+ """
+
+ name = "HTML+Handlebars"
+ aliases = ["html+handlebars"]
+ filenames = ['*.handlebars', '*.hbs']
+ mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, HandlebarsLexer, **options)
+
+
+class YamlJinjaLexer(DelegatingLexer):
+ """
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
+ `YamlLexer`.
+
+ Commonly used in Saltstack salt states.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'YAML+Jinja'
+ aliases = ['yaml+jinja', 'salt', 'sls']
+ filenames = ['*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2']
+ mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
+
+ def __init__(self, **options):
+ super().__init__(YamlLexer, DjangoLexer, **options)
+
+
+class LiquidLexer(RegexLexer):
+ """
+ Lexer for Liquid templates.
+
+ .. versionadded:: 2.0
+ """
+ name = 'liquid'
+ url = 'https://www.rubydoc.info/github/Shopify/liquid'
+ aliases = ['liquid']
+ filenames = ['*.liquid']
+
+ tokens = {
+ 'root': [
+ (r'[^{]+', Text),
+ # tags and block tags
+ (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
+ # output tags
+ (r'(\{\{)(\s*)([^\s}]+)',
+ bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
+ 'output'),
+ (r'\{', Text)
+ ],
+
+ 'tag-or-block': [
+ # builtin logic blocks
+ (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
+ (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
+ combined('end-of-block', 'whitespace', 'generic')),
+ (r'(else)(\s*)(%\})',
+ bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
+
+ # other builtin blocks
+ (r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
+ bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
+ Whitespace, Punctuation), '#pop'),
+ (r'(comment)(\s*)(%\})',
+ bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
+ (r'(raw)(\s*)(%\})',
+ bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
+
+ # end of block
+ (r'(end(case|unless|if))(\s*)(%\})',
+ bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
+ (r'(end([^\s%]+))(\s*)(%\})',
+ bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
+
+ # builtin tags (assign and include are handled together with usual tags)
+ (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
+ bygroups(Name.Tag, Whitespace,
+ using(this, state='generic'), Punctuation, Whitespace),
+ 'variable-tag-markup'),
+
+ # other tags or blocks
+ (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
+ ],
+
+ 'output': [
+ include('whitespace'),
+ (r'\}\}', Punctuation, '#pop'), # end of output
+
+ (r'\|', Punctuation, 'filters')
+ ],
+
+ 'filters': [
+ include('whitespace'),
+ (r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output
+
+ (r'([^\s|:]+)(:?)(\s*)',
+ bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
+ ],
+
+ 'filter-markup': [
+ (r'\|', Punctuation, '#pop'),
+ include('end-of-tag'),
+ include('default-param-markup')
+ ],
+
+ 'condition': [
+ include('end-of-block'),
+ include('whitespace'),
+
+ (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
+ bygroups(using(this, state = 'generic'), Whitespace, Operator,
+ Whitespace, using(this, state = 'generic'), Whitespace,
+ Punctuation)),
+ (r'\b!', Operator),
+ (r'\bnot\b', Operator.Word),
+ (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
+ bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
+ Whitespace, using(this, state = 'generic'))),
+
+ include('generic'),
+ include('whitespace')
+ ],
+
+ 'generic-value': [
+ include('generic'),
+ include('end-at-whitespace')
+ ],
+
+ 'operator': [
+ (r'(\s*)((=|!|>|<)=?)(\s*)',
+ bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
+ (r'(\s*)(\bcontains\b)(\s*)',
+ bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
+ ],
+
+ 'end-of-tag': [
+ (r'\}\}', Punctuation, '#pop')
+ ],
+
+ 'end-of-block': [
+ (r'%\}', Punctuation, ('#pop', '#pop'))
+ ],
+
+ 'end-at-whitespace': [
+ (r'\s+', Whitespace, '#pop')
+ ],
+
+ # states for unknown markup
+ 'param-markup': [
+ include('whitespace'),
+ # params with colons or equals
+ (r'([^\s=:]+)(\s*)(=|:)',
+ bygroups(Name.Attribute, Whitespace, Operator)),
+ # explicit variables
+ (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
+ bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
+ Whitespace, Punctuation)),
+
+ include('string'),
+ include('number'),
+ include('keyword'),
+ (r',', Punctuation)
+ ],
+
+ 'default-param-markup': [
+ include('param-markup'),
+ (r'.', Text) # fallback for switches / variables / un-quoted strings / ...
+ ],
+
+ 'variable-param-markup': [
+ include('param-markup'),
+ include('variable'),
+ (r'.', Text) # fallback
+ ],
+
+ 'tag-markup': [
+ (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
+ include('default-param-markup')
+ ],
+
+ 'variable-tag-markup': [
+ (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
+ include('variable-param-markup')
+ ],
+
+ # states for different values types
+ 'keyword': [
+ (r'\b(false|true)\b', Keyword.Constant)
+ ],
+
+ 'variable': [
+ (r'[a-zA-Z_]\w*', Name.Variable),
+ (r'(?<=\w)\.(?=\w)', Punctuation)
+ ],
+
+ 'string': [
+ (r"'[^']*'", String.Single),
+ (r'"[^"]*"', String.Double)
+ ],
+
+ 'number': [
+ (r'\d+\.\d+', Number.Float),
+ (r'\d+', Number.Integer)
+ ],
+
+ 'generic': [ # decides for variable, string, keyword or number
+ include('keyword'),
+ include('string'),
+ include('number'),
+ include('variable')
+ ],
+
+ 'whitespace': [
+ (r'[ \t]+', Whitespace)
+ ],
+
+ # states for builtin blocks
+ 'comment': [
+ (r'(\{%)(\s*)(endcomment)(\s*)(%\})',
+ bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
+ Punctuation), ('#pop', '#pop')),
+ (r'.', Comment)
+ ],
+
+ 'raw': [
+ (r'[^{]+', Text),
+ (r'(\{%)(\s*)(endraw)(\s*)(%\})',
+ bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
+ Punctuation), '#pop'),
+ (r'\{', Text)
+ ],
+ }
+
+
+class TwigLexer(RegexLexer):
+ """
+ Twig template lexer.
+
+ It just highlights Twig code between the preprocessor directives,
+ other data is left untouched by the lexer.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Twig'
+ aliases = ['twig']
+ mimetypes = ['application/x-twig']
+
+ flags = re.M | re.S
+
+ # Note that a backslash is included in the following two patterns
+ # PHP uses a backslash as a namespace separator
+ _ident_char = r'[\\\w-]|[^\x00-\x7f]'
+ _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
+ _ident_end = r'(?:' + _ident_char + ')*'
+ _ident_inner = _ident_begin + _ident_end
+
+ tokens = {
+ 'root': [
+ (r'[^{]+', Other),
+ (r'\{\{', Comment.Preproc, 'var'),
+ # twig comments
+ (r'\{\#.*?\#\}', Comment),
+ # raw twig blocks
+ (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
+ r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
+ bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
+ Other, Comment.Preproc, Text, Keyword, Text,
+ Comment.Preproc)),
+ (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
+ r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
+ bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
+ Other, Comment.Preproc, Text, Keyword, Text,
+ Comment.Preproc)),
+ # filter blocks
+ (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
+ bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
+ 'tag'),
+ (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
+ bygroups(Comment.Preproc, Text, Keyword), 'tag'),
+ (r'\{', Other),
+ ],
+ 'varnames': [
+ (r'(\|)(\s*)(%s)' % _ident_inner,
+ bygroups(Operator, Text, Name.Function)),
+ (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
+ bygroups(Keyword, Text, Keyword, Text, Name.Function)),
+ (r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
+ (r'(in|not|and|b-and|or|b-or|b-xor|is'
+ r'if|elseif|else|import'
+ r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
+ r'matches|starts\s+with|ends\s+with)\b',
+ Keyword),
+ (r'(loop|block|parent)\b', Name.Builtin),
+ (_ident_inner, Name.Variable),
+ (r'\.' + _ident_inner, Name.Variable),
+ (r'\.[0-9]+', Number),
+ (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ ],
+ 'var': [
+ (r'\s+', Text),
+ (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
+ include('varnames')
+ ],
+ 'tag': [
+ (r'\s+', Text),
+ (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
+ include('varnames'),
+ (r'.', Punctuation),
+ ],
+ }
+
+
+class TwigHtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `TwigLexer` that highlights unlexed data with the
+ `HtmlLexer`.
+
+ .. versionadded:: 2.0
+ """
+
+ name = "HTML+Twig"
+ aliases = ["html+twig"]
+ filenames = ['*.twig']
+ mimetypes = ['text/html+twig']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, TwigLexer, **options)
+
+
+class Angular2Lexer(RegexLexer):
+ """
+ Generic angular2 template lexer.
+
+ Highlights only the Angular template tags (stuff between `{{` and `}}` and
+ special attributes: '(event)=', '[property]=', '[(twoWayBinding)]=').
+ Everything else is left for a delegating lexer.
+
+ .. versionadded:: 2.1
+ """
+
+ name = "Angular2"
+ url = 'https://angular.io/guide/template-syntax'
+ aliases = ['ng2']
+
+ tokens = {
+ 'root': [
+ (r'[^{([*#]+', Other),
+
+ # {{meal.name}}
+ (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'),
+
+ # (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar"
+ (r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)',
+ bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text),
+ 'attr'),
+ (r'([([]+)([\w:.-]+)([\])]+)(\s*)',
+ bygroups(Punctuation, Name.Attribute, Punctuation, Text)),
+
+ # *ngIf="..."; #f="ngForm"
+ (r'([*#])([\w:.-]+)(\s*)(=)(\s*)',
+ bygroups(Punctuation, Name.Attribute, Text, Operator, Text), 'attr'),
+ (r'([*#])([\w:.-]+)(\s*)',
+ bygroups(Punctuation, Name.Attribute, Text)),
+ ],
+
+ 'ngExpression': [
+ (r'\s+(\|\s+)?', Text),
+ (r'\}\}', Comment.Preproc, '#pop'),
+
+ # Literals
+ (r':?(true|false)', String.Boolean),
+ (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+
+ # Variabletext
+ (r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable),
+ (r'\.[\w-]+(\(.*\))?', Name.Variable),
+
+ # inline If
+ (r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)',
+ bygroups(Operator, Text, String, Text, Operator, Text, String, Text)),
+ ],
+ 'attr': [
+ ('".*?"', String, '#pop'),
+ ("'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop'),
+ ],
+ }
+
+
+class Angular2HtmlLexer(DelegatingLexer):
+ """
+ Subclass of the `Angular2Lexer` that highlights unlexed data with the
+ `HtmlLexer`.
+
+ .. versionadded:: 2.0
+ """
+
+ name = "HTML + Angular2"
+ aliases = ["html+ng2"]
+ filenames = ['*.ng2']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, Angular2Lexer, **options)
+
+
+class SqlJinjaLexer(DelegatingLexer):
+ """
+ Templated SQL lexer.
+
+ .. versionadded:: 2.13
+ """
+
+ name = 'SQL+Jinja'
+ aliases = ['sql+jinja']
+ filenames = ['*.sql', '*.sql.j2', '*.sql.jinja2']
+
+ def __init__(self, **options):
+ super().__init__(SqlLexer, DjangoLexer, **options)
+
+ def analyse_text(text):
+ rv = 0.0
+ # dbt's ref function
+ if re.search(r'\{\{\s*ref\(.*\)\s*\}\}', text):
+ rv += 0.4
+ # dbt's source function
+ if re.search(r'\{\{\s*source\(.*\)\s*\}\}', text):
+ rv += 0.25
+ # Jinja macro
+ if re.search(
+ r'\{%-?\s*macro \w+\(.*\)\s*-?%\}\s+.*\s+\{%-?\s*endmacro\s*-?%\}',
+ text,
+ re.S,
+ ):
+ rv += 0.15
+ return rv
diff --git a/pygments/lexers/teraterm.py b/pygments/lexers/teraterm.py
new file mode 100644
index 0000000..a4da2a2
--- /dev/null
+++ b/pygments/lexers/teraterm.py
@@ -0,0 +1,326 @@
+"""
+ pygments.lexers.teraterm
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Tera Term macro files.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Text, Comment, Operator, Name, String, \
+ Number, Keyword, Error
+
+__all__ = ['TeraTermLexer']
+
+
+class TeraTermLexer(RegexLexer):
+ """
+ For Tera Term macro source code.
+
+ .. versionadded:: 2.4
+ """
+ name = 'Tera Term macro'
+ url = 'https://ttssh2.osdn.jp/'
+ aliases = ['teratermmacro', 'teraterm', 'ttl']
+ filenames = ['*.ttl']
+ mimetypes = ['text/x-teratermmacro']
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('labels'),
+ include('commands'),
+ include('builtin-variables'),
+ include('user-variables'),
+ include('operators'),
+ include('numeric-literals'),
+ include('string-literals'),
+ include('all-whitespace'),
+ (r'\S', Text),
+ ],
+ 'comments': [
+ (r';[^\r\n]*', Comment.Single),
+ (r'/\*', Comment.Multiline, 'in-comment'),
+ ],
+ 'in-comment': [
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[^*/]+', Comment.Multiline),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'labels': [
+ (r'(?i)^(\s*)(:[a-z0-9_]+)', bygroups(Text.Whitespace, Name.Label)),
+ ],
+ 'commands': [
+ (
+ r'(?i)\b('
+ r'basename|'
+ r'beep|'
+ r'bplusrecv|'
+ r'bplussend|'
+ r'break|'
+ r'bringupbox|'
+ # 'call' is handled separately.
+ r'callmenu|'
+ r'changedir|'
+ r'checksum16|'
+ r'checksum16file|'
+ r'checksum32|'
+ r'checksum32file|'
+ r'checksum8|'
+ r'checksum8file|'
+ r'clearscreen|'
+ r'clipb2var|'
+ r'closesbox|'
+ r'closett|'
+ r'code2str|'
+ r'connect|'
+ r'continue|'
+ r'crc16|'
+ r'crc16file|'
+ r'crc32|'
+ r'crc32file|'
+ r'cygconnect|'
+ r'delpassword|'
+ r'dirname|'
+ r'dirnamebox|'
+ r'disconnect|'
+ r'dispstr|'
+ r'do|'
+ r'else|'
+ r'elseif|'
+ r'enablekeyb|'
+ r'end|'
+ r'endif|'
+ r'enduntil|'
+ r'endwhile|'
+ r'exec|'
+ r'execcmnd|'
+ r'exit|'
+ r'expandenv|'
+ r'fileclose|'
+ r'fileconcat|'
+ r'filecopy|'
+ r'filecreate|'
+ r'filedelete|'
+ r'filelock|'
+ r'filemarkptr|'
+ r'filenamebox|'
+ r'fileopen|'
+ r'fileread|'
+ r'filereadln|'
+ r'filerename|'
+ r'filesearch|'
+ r'fileseek|'
+ r'fileseekback|'
+ r'filestat|'
+ r'filestrseek|'
+ r'filestrseek2|'
+ r'filetruncate|'
+ r'fileunlock|'
+ r'filewrite|'
+ r'filewriteln|'
+ r'findclose|'
+ r'findfirst|'
+ r'findnext|'
+ r'flushrecv|'
+ r'foldercreate|'
+ r'folderdelete|'
+ r'foldersearch|'
+ r'for|'
+ r'getdate|'
+ r'getdir|'
+ r'getenv|'
+ r'getfileattr|'
+ r'gethostname|'
+ r'getipv4addr|'
+ r'getipv6addr|'
+ r'getmodemstatus|'
+ r'getpassword|'
+ r'getspecialfolder|'
+ r'gettime|'
+ r'gettitle|'
+ r'getttdir|'
+ r'getver|'
+ # 'goto' is handled separately.
+ r'if|'
+ r'ifdefined|'
+ r'include|'
+ r'inputbox|'
+ r'int2str|'
+ r'intdim|'
+ r'ispassword|'
+ r'kmtfinish|'
+ r'kmtget|'
+ r'kmtrecv|'
+ r'kmtsend|'
+ r'listbox|'
+ r'loadkeymap|'
+ r'logautoclosemode|'
+ r'logclose|'
+ r'loginfo|'
+ r'logopen|'
+ r'logpause|'
+ r'logrotate|'
+ r'logstart|'
+ r'logwrite|'
+ r'loop|'
+ r'makepath|'
+ r'messagebox|'
+ r'mpause|'
+ r'next|'
+ r'passwordbox|'
+ r'pause|'
+ r'quickvanrecv|'
+ r'quickvansend|'
+ r'random|'
+ r'recvln|'
+ r'regexoption|'
+ r'restoresetup|'
+ r'return|'
+ r'rotateleft|'
+ r'rotateright|'
+ r'scprecv|'
+ r'scpsend|'
+ r'send|'
+ r'sendbreak|'
+ r'sendbroadcast|'
+ r'sendfile|'
+ r'sendkcode|'
+ r'sendln|'
+ r'sendlnbroadcast|'
+ r'sendlnmulticast|'
+ r'sendmulticast|'
+ r'setbaud|'
+ r'setdate|'
+ r'setdebug|'
+ r'setdir|'
+ r'setdlgpos|'
+ r'setdtr|'
+ r'setecho|'
+ r'setenv|'
+ r'setexitcode|'
+ r'setfileattr|'
+ r'setflowctrl|'
+ r'setmulticastname|'
+ r'setpassword|'
+ r'setrts|'
+ r'setspeed|'
+ r'setsync|'
+ r'settime|'
+ r'settitle|'
+ r'show|'
+ r'showtt|'
+ r'sprintf|'
+ r'sprintf2|'
+ r'statusbox|'
+ r'str2code|'
+ r'str2int|'
+ r'strcompare|'
+ r'strconcat|'
+ r'strcopy|'
+ r'strdim|'
+ r'strinsert|'
+ r'strjoin|'
+ r'strlen|'
+ r'strmatch|'
+ r'strremove|'
+ r'strreplace|'
+ r'strscan|'
+ r'strspecial|'
+ r'strsplit|'
+ r'strtrim|'
+ r'testlink|'
+ r'then|'
+ r'tolower|'
+ r'toupper|'
+ r'unlink|'
+ r'until|'
+ r'uptime|'
+ r'var2clipb|'
+ r'wait|'
+ r'wait4all|'
+ r'waitevent|'
+ r'waitln|'
+ r'waitn|'
+ r'waitrecv|'
+ r'waitregex|'
+ r'while|'
+ r'xmodemrecv|'
+ r'xmodemsend|'
+ r'yesnobox|'
+ r'ymodemrecv|'
+ r'ymodemsend|'
+ r'zmodemrecv|'
+ r'zmodemsend'
+ r')\b',
+ Keyword,
+ ),
+ (r'(?i)(call|goto)([ \t]+)([a-z0-9_]+)',
+ bygroups(Keyword, Text.Whitespace, Name.Label)),
+ ],
+ 'builtin-variables': [
+ (
+ r'(?i)('
+ r'groupmatchstr1|'
+ r'groupmatchstr2|'
+ r'groupmatchstr3|'
+ r'groupmatchstr4|'
+ r'groupmatchstr5|'
+ r'groupmatchstr6|'
+ r'groupmatchstr7|'
+ r'groupmatchstr8|'
+ r'groupmatchstr9|'
+ r'inputstr|'
+ r'matchstr|'
+ r'mtimeout|'
+ r'param1|'
+ r'param2|'
+ r'param3|'
+ r'param4|'
+ r'param5|'
+ r'param6|'
+ r'param7|'
+ r'param8|'
+ r'param9|'
+ r'paramcnt|'
+ r'params|'
+ r'result|'
+ r'timeout'
+ r')\b',
+ Name.Builtin
+ ),
+ ],
+ 'user-variables': [
+ (r'(?i)[a-z_][a-z0-9_]*', Name.Variable),
+ ],
+ 'numeric-literals': [
+ (r'(-?)([0-9]+)', bygroups(Operator, Number.Integer)),
+ (r'(?i)\$[0-9a-f]+', Number.Hex),
+ ],
+ 'string-literals': [
+ (r'(?i)#(?:[0-9]+|\$[0-9a-f]+)', String.Char),
+ (r"'[^'\n]*'", String.Single),
+ (r'"[^"\n]*"', String.Double),
+ # Opening quotes without a closing quote on the same line are errors.
+ (r"('[^']*)(\n)", bygroups(Error, Text.Whitespace)),
+ (r'("[^"]*)(\n)', bygroups(Error, Text.Whitespace)),
+ ],
+ 'operators': [
+ (r'and|not|or|xor', Operator.Word),
+ (r'[!%&*+<=>^~\|\/-]+', Operator),
+ (r'[()]', String.Symbol),
+ ],
+ 'all-whitespace': [
+ (r'\s+', Text.Whitespace),
+ ],
+ }
+
+ # Turtle and Tera Term macro files share the same file extension
+ # but each has a recognizable and distinct syntax.
+ def analyse_text(text):
+ if re.search(TeraTermLexer.tokens['commands'][0][0], text):
+ return 0.01
diff --git a/pygments/lexers/testing.py b/pygments/lexers/testing.py
new file mode 100644
index 0000000..e45fe52
--- /dev/null
+++ b/pygments/lexers/testing.py
@@ -0,0 +1,210 @@
+"""
+ pygments.lexers.testing
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for testing languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Comment, Keyword, Name, String, Number, Generic, Text
+
+__all__ = ['GherkinLexer', 'TAPLexer']
+
+
+class GherkinLexer(RegexLexer):
+ """
+ For Gherkin syntax.
+
+ .. versionadded:: 1.2
+ """
+ name = 'Gherkin'
+ aliases = ['gherkin', 'cucumber']
+ filenames = ['*.feature']
+ mimetypes = ['text/x-gherkin']
+
+ feature_keywords = '^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
+ feature_element_keywords = '^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|剧本大纲|剧本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
+ examples_keywords = '^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
+ step_keywords = '^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假设|假如|假定|但是|但し|並且|并且|同時|同时|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\\* )'
+
+ tokens = {
+ 'comments': [
+ (r'^\s*#.*$', Comment),
+ ],
+ 'feature_elements': [
+ (step_keywords, Keyword, "step_content_stack"),
+ include('comments'),
+ (r"(\s|.)", Name.Function),
+ ],
+ 'feature_elements_on_stack': [
+ (step_keywords, Keyword, "#pop:2"),
+ include('comments'),
+ (r"(\s|.)", Name.Function),
+ ],
+ 'examples_table': [
+ (r"\s+\|", Keyword, 'examples_table_header'),
+ include('comments'),
+ (r"(\s|.)", Name.Function),
+ ],
+ 'examples_table_header': [
+ (r"\s+\|\s*$", Keyword, "#pop:2"),
+ include('comments'),
+ (r"\\\|", Name.Variable),
+ (r"\s*\|", Keyword),
+ (r"[^|]", Name.Variable),
+ ],
+ 'scenario_sections_on_stack': [
+ (feature_element_keywords,
+ bygroups(Name.Function, Keyword, Keyword, Name.Function),
+ "feature_elements_on_stack"),
+ ],
+ 'narrative': [
+ include('scenario_sections_on_stack'),
+ include('comments'),
+ (r"(\s|.)", Name.Function),
+ ],
+ 'table_vars': [
+ (r'(<[^>]+>)', Name.Variable),
+ ],
+ 'numbers': [
+ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String),
+ ],
+ 'string': [
+ include('table_vars'),
+ (r'(\s|.)', String),
+ ],
+ 'py_string': [
+ (r'"""', Keyword, "#pop"),
+ include('string'),
+ ],
+ 'step_content_root': [
+ (r"$", Keyword, "#pop"),
+ include('step_content'),
+ ],
+ 'step_content_stack': [
+ (r"$", Keyword, "#pop:2"),
+ include('step_content'),
+ ],
+ 'step_content': [
+ (r'"', Name.Function, "double_string"),
+ include('table_vars'),
+ include('numbers'),
+ include('comments'),
+ (r'(\s|.)', Name.Function),
+ ],
+ 'table_content': [
+ (r"\s+\|\s*$", Keyword, "#pop"),
+ include('comments'),
+ (r"\\\|", String),
+ (r"\s*\|", Keyword),
+ include('string'),
+ ],
+ 'double_string': [
+ (r'"', Name.Function, "#pop"),
+ include('string'),
+ ],
+ 'root': [
+ (r'\n', Name.Function),
+ include('comments'),
+ (r'"""', Keyword, "py_string"),
+ (r'\s+\|', Keyword, 'table_content'),
+ (r'"', Name.Function, "double_string"),
+ include('table_vars'),
+ include('numbers'),
+ (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)),
+ (step_keywords, bygroups(Name.Function, Keyword),
+ 'step_content_root'),
+ (feature_keywords, bygroups(Keyword, Keyword, Name.Function),
+ 'narrative'),
+ (feature_element_keywords,
+ bygroups(Name.Function, Keyword, Keyword, Name.Function),
+ 'feature_elements'),
+ (examples_keywords,
+ bygroups(Name.Function, Keyword, Keyword, Name.Function),
+ 'examples_table'),
+ (r'(\s|.)', Name.Function),
+ ]
+ }
+
+ def analyse_text(self, text):
+ return
+
+
+class TAPLexer(RegexLexer):
+ """
+ For Test Anything Protocol (TAP) output.
+
+ .. versionadded:: 2.1
+ """
+ name = 'TAP'
+ url = 'https://testanything.org/'
+ aliases = ['tap']
+ filenames = ['*.tap']
+
+ tokens = {
+ 'root': [
+ # A TAP version may be specified.
+ (r'^TAP version \d+\n', Name.Namespace),
+
+ # Specify a plan with a plan line.
+ (r'^1\.\.\d+', Keyword.Declaration, 'plan'),
+
+ # A test failure
+ (r'^(not ok)([^\S\n]*)(\d*)',
+ bygroups(Generic.Error, Text, Number.Integer), 'test'),
+
+ # A test success
+ (r'^(ok)([^\S\n]*)(\d*)',
+ bygroups(Keyword.Reserved, Text, Number.Integer), 'test'),
+
+ # Diagnostics start with a hash.
+ (r'^#.*\n', Comment),
+
+ # TAP's version of an abort statement.
+ (r'^Bail out!.*\n', Generic.Error),
+
+ # TAP ignores any unrecognized lines.
+ (r'^.*\n', Text),
+ ],
+ 'plan': [
+ # Consume whitespace (but not newline).
+ (r'[^\S\n]+', Text),
+
+ # A plan may have a directive with it.
+ (r'#', Comment, 'directive'),
+
+ # Or it could just end.
+ (r'\n', Comment, '#pop'),
+
+ # Anything else is wrong.
+ (r'.*\n', Generic.Error, '#pop'),
+ ],
+ 'test': [
+ # Consume whitespace (but not newline).
+ (r'[^\S\n]+', Text),
+
+ # A test may have a directive with it.
+ (r'#', Comment, 'directive'),
+
+ (r'\S+', Text),
+
+ (r'\n', Text, '#pop'),
+ ],
+ 'directive': [
+ # Consume whitespace (but not newline).
+ (r'[^\S\n]+', Comment),
+
+ # Extract todo items.
+ (r'(?i)\bTODO\b', Comment.Preproc),
+
+ # Extract skip items.
+ (r'(?i)\bSKIP\S*', Comment.Preproc),
+
+ (r'\S+', Comment),
+
+ (r'\n', Comment, '#pop:2'),
+ ],
+ }
diff --git a/pygments/lexers/text.py b/pygments/lexers/text.py
new file mode 100644
index 0000000..d9bf03d
--- /dev/null
+++ b/pygments/lexers/text.py
@@ -0,0 +1,26 @@
+"""
+ pygments.lexers.text
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for non-source code file types.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.configs import ApacheConfLexer, NginxConfLexer, \
+ SquidConfLexer, LighttpdConfLexer, IniLexer, RegeditLexer, PropertiesLexer, \
+ UnixConfigLexer
+from pygments.lexers.console import PyPyLogLexer
+from pygments.lexers.textedit import VimLexer
+from pygments.lexers.markup import BBCodeLexer, MoinWikiLexer, RstLexer, \
+ TexLexer, GroffLexer
+from pygments.lexers.installers import DebianControlLexer, SourcesListLexer
+from pygments.lexers.make import MakefileLexer, BaseMakefileLexer, CMakeLexer
+from pygments.lexers.haxe import HxmlLexer
+from pygments.lexers.sgf import SmartGameFormatLexer
+from pygments.lexers.diff import DiffLexer, DarcsPatchLexer
+from pygments.lexers.data import YamlLexer
+from pygments.lexers.textfmts import IrcLogsLexer, GettextLexer, HttpLexer
+
+__all__ = []
diff --git a/pygments/lexers/textedit.py b/pygments/lexers/textedit.py
new file mode 100644
index 0000000..14884bb
--- /dev/null
+++ b/pygments/lexers/textedit.py
@@ -0,0 +1,202 @@
+"""
+ pygments.lexers.textedit
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for languages related to text processing.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from bisect import bisect
+
+from pygments.lexer import RegexLexer, bygroups, default, include, this, using
+from pygments.lexers.python import PythonLexer
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
+
+__all__ = ['AwkLexer', 'SedLexer', 'VimLexer']
+
+
+class AwkLexer(RegexLexer):
+ """
+ For Awk scripts.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Awk'
+ aliases = ['awk', 'gawk', 'mawk', 'nawk']
+ filenames = ['*.awk']
+ mimetypes = ['application/x-awk']
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'#.*$', Comment.Single)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'\B', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ (r'^(?=\s|/)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'\+\+|--|\|\||&&|in\b|\$|!?~|'
+ r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(break|continue|do|while|exit|for|if|else|'
+ r'return)\b', Keyword, 'slashstartsregex'),
+ (r'function\b', Keyword.Declaration, 'slashstartsregex'),
+ (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
+ r'length|match|split|sprintf|sub|substr|tolower|toupper|close|'
+ r'fflush|getline|next|nextfile|print|printf|strftime|systime|'
+ r'delete|system)\b', Keyword.Reserved),
+ (r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|'
+ r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|'
+ r'RSTART|RT|SUBSEP)\b', Name.Builtin),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ]
+ }
+
+
+class SedLexer(RegexLexer):
+ """
+ Lexer for Sed script files.
+ """
+ name = 'Sed'
+ aliases = ['sed', 'gsed', 'ssed']
+ filenames = ['*.sed', '*.[gs]sed']
+ mimetypes = ['text/x-sed']
+ flags = re.MULTILINE
+
+ # Match the contents within delimiters such as /<contents>/
+ _inside_delims = r'((?:(?:\\[^\n]|[^\\])*?\\\n)*?(?:\\.|[^\\])*?)'
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'#.*$', Comment.Single),
+ (r'[0-9]+', Number.Integer),
+ (r'\$', Operator),
+ (r'[{};,!]', Punctuation),
+ (r'[dDFgGhHlnNpPqQxz=]', Keyword),
+ (r'([berRtTvwW:])([^;\n]*)', bygroups(Keyword, String.Single)),
+ (r'([aci])((?:.*?\\\n)*(?:.*?[^\\]$))', bygroups(Keyword, String.Double)),
+ (r'([qQ])([0-9]*)', bygroups(Keyword, Number.Integer)),
+ (r'(/)' + _inside_delims + r'(/)', bygroups(Punctuation, String.Regex, Punctuation)),
+ (r'(\\(.))' + _inside_delims + r'(\2)',
+ bygroups(Punctuation, None, String.Regex, Punctuation)),
+ (r'(y)(.)' + _inside_delims + r'(\2)' + _inside_delims + r'(\2)',
+ bygroups(Keyword, Punctuation, String.Single, Punctuation, String.Single, Punctuation)),
+ (r'(s)(.)' + _inside_delims + r'(\2)' + _inside_delims + r'(\2)((?:[gpeIiMm]|[0-9])*)',
+ bygroups(Keyword, Punctuation, String.Regex, Punctuation, String.Single, Punctuation,
+ Keyword))
+ ]
+ }
+
+class VimLexer(RegexLexer):
+ """
+ Lexer for VimL script files.
+
+ .. versionadded:: 0.8
+ """
+ name = 'VimL'
+ aliases = ['vim']
+ filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
+ '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
+ mimetypes = ['text/x-vim']
+ flags = re.MULTILINE
+
+ _python = r'py(?:t(?:h(?:o(?:n)?)?)?)?'
+
+ tokens = {
+ 'root': [
+ (r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)',
+ bygroups(using(this), Keyword, Text, Operator, Text, Text,
+ using(PythonLexer), Text)),
+ (r'^([ \t:]*)(' + _python + r')([ \t])(.*)',
+ bygroups(using(this), Keyword, Text, using(PythonLexer))),
+
+ (r'^\s*".*', Comment),
+
+ (r'[ \t]+', Text),
+ # TODO: regexes can have other delims
+ (r'/[^/\\\n]*(?:\\[\s\S][^/\\\n]*)*/', String.Regex),
+ (r'"[^"\\\n]*(?:\\[\s\S][^"\\\n]*)*"', String.Double),
+ (r"'[^\n']*(?:''[^\n']*)*'", String.Single),
+
+ # Who decided that doublequote was a good comment character??
+ (r'(?<=\s)"[^\-:.%#=*].*', Comment),
+ (r'-?\d+', Number),
+ (r'#[0-9a-f]{6}', Number.Hex),
+ (r'^:', Punctuation),
+ (r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
+ (r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
+ Keyword),
+ (r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
+ (r'\b\w+\b', Name.Other), # These are postprocessed below
+ (r'.', Text),
+ ],
+ }
+
+ def __init__(self, **options):
+ from pygments.lexers._vim_builtins import auto, command, option
+ self._cmd = command
+ self._opt = option
+ self._aut = auto
+
+ RegexLexer.__init__(self, **options)
+
+ def is_in(self, w, mapping):
+ r"""
+ It's kind of difficult to decide if something might be a keyword
+ in VimL because it allows you to abbreviate them. In fact,
+ 'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
+ valid ways to call it so rather than making really awful regexps
+ like::
+
+ \bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
+
+ we match `\b\w+\b` and then call is_in() on those tokens. See
+ `scripts/get_vimkw.py` for how the lists are extracted.
+ """
+ p = bisect(mapping, (w,))
+ if p > 0:
+ if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
+ mapping[p-1][1][:len(w)] == w:
+ return True
+ if p < len(mapping):
+ return mapping[p][0] == w[:len(mapping[p][0])] and \
+ mapping[p][1][:len(w)] == w
+ return False
+
+ def get_tokens_unprocessed(self, text):
+ # TODO: builtins are only subsequent tokens on lines
+ # and 'keywords' only happen at the beginning except
+ # for :au ones
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text):
+ if token is Name.Other:
+ if self.is_in(value, self._cmd):
+ yield index, Keyword, value
+ elif self.is_in(value, self._opt) or \
+ self.is_in(value, self._aut):
+ yield index, Name.Builtin, value
+ else:
+ yield index, Text, value
+ else:
+ yield index, token, value
diff --git a/pygments/lexers/textfmts.py b/pygments/lexers/textfmts.py
new file mode 100644
index 0000000..b6f635c
--- /dev/null
+++ b/pygments/lexers/textfmts.py
@@ -0,0 +1,431 @@
+"""
+ pygments.lexers.textfmts
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for various text formats.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexers import guess_lexer, get_lexer_by_name
+from pygments.lexer import RegexLexer, bygroups, default, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Generic, Literal, Punctuation
+from pygments.util import ClassNotFound
+
+__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer',
+ 'NotmuchLexer', 'KernelLogLexer']
+
+
+class IrcLogsLexer(RegexLexer):
+ """
+ Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
+ """
+
+ name = 'IRC logs'
+ aliases = ['irc']
+ filenames = ['*.weechatlog']
+ mimetypes = ['text/x-irclog']
+
+ flags = re.VERBOSE | re.MULTILINE
+ timestamp = r"""
+ (
+ # irssi / xchat and others
+ (?: \[|\()? # Opening bracket or paren for the timestamp
+ (?: # Timestamp
+ (?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits
+ (?:\d{1,4})
+ [T ])? # Date/time separator: T or space
+ (?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits
+ (?: \d?\d)
+ )
+ (?: \]|\))?\s+ # Closing bracket or paren for the timestamp
+ |
+ # weechat
+ \d{4}\s\w{3}\s\d{2}\s # Date
+ \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
+ |
+ # xchat
+ \w{3}\s\d{2}\s # Date
+ \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
+ )?
+ """
+ tokens = {
+ 'root': [
+ # log start/end
+ (r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
+ # hack
+ ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
+ # normal msgs
+ ("^" + timestamp + r"""
+ (\s*<.*?>\s*) # Nick """,
+ bygroups(Comment.Preproc, Name.Tag), 'msg'),
+ # /me msgs
+ ("^" + timestamp + r"""
+ (\s*[*]\s+) # Star
+ (\S+\s+.*?\n) # Nick + rest of message """,
+ bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
+ # join/part msgs
+ ("^" + timestamp + r"""
+ (\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
+ (\S+\s+) # Nick + Space
+ (.*?\n) # Rest of message """,
+ bygroups(Comment.Preproc, Keyword, String, Comment)),
+ (r"^.*?\n", Text),
+ ],
+ 'msg': [
+ (r"\S+:(?!//)", Name.Attribute), # Prefix
+ (r".*\n", Text, '#pop'),
+ ],
+ }
+
+
+class GettextLexer(RegexLexer):
+ """
+ Lexer for Gettext catalog files.
+
+ .. versionadded:: 0.9
+ """
+ name = 'Gettext Catalog'
+ aliases = ['pot', 'po']
+ filenames = ['*.pot', '*.po']
+ mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
+
+ tokens = {
+ 'root': [
+ (r'^#,\s.*?$', Keyword.Type),
+ (r'^#:\s.*?$', Keyword.Declaration),
+ # (r'^#$', Comment),
+ (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
+ (r'^(")([A-Za-z-]+:)(.*")$',
+ bygroups(String, Name.Property, String)),
+ (r'^".*"$', String),
+ (r'^(msgid|msgid_plural|msgstr|msgctxt)(\s+)(".*")$',
+ bygroups(Name.Variable, Text, String)),
+ (r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
+ bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
+ ]
+ }
+
+
+class HttpLexer(RegexLexer):
+ """
+ Lexer for HTTP sessions.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'HTTP'
+ aliases = ['http']
+
+ flags = re.DOTALL
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ """Reset the content-type state."""
+ self.content_type = None
+ return RegexLexer.get_tokens_unprocessed(self, text, stack)
+
+ def header_callback(self, match):
+ if match.group(1).lower() == 'content-type':
+ content_type = match.group(5).strip()
+ if ';' in content_type:
+ content_type = content_type[:content_type.find(';')].strip()
+ self.content_type = content_type
+ yield match.start(1), Name.Attribute, match.group(1)
+ yield match.start(2), Text, match.group(2)
+ yield match.start(3), Operator, match.group(3)
+ yield match.start(4), Text, match.group(4)
+ yield match.start(5), Literal, match.group(5)
+ yield match.start(6), Text, match.group(6)
+
+ def continuous_header_callback(self, match):
+ yield match.start(1), Text, match.group(1)
+ yield match.start(2), Literal, match.group(2)
+ yield match.start(3), Text, match.group(3)
+
+ def content_callback(self, match):
+ content_type = getattr(self, 'content_type', None)
+ content = match.group()
+ offset = match.start()
+ if content_type:
+ from pygments.lexers import get_lexer_for_mimetype
+ possible_lexer_mimetypes = [content_type]
+ if '+' in content_type:
+ # application/calendar+xml can be treated as application/xml
+ # if there's not a better match.
+ general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2',
+ content_type)
+ possible_lexer_mimetypes.append(general_type)
+
+ for i in possible_lexer_mimetypes:
+ try:
+ lexer = get_lexer_for_mimetype(i)
+ except ClassNotFound:
+ pass
+ else:
+ for idx, token, value in lexer.get_tokens_unprocessed(content):
+ yield offset + idx, token, value
+ return
+ yield offset, Text, content
+
+ tokens = {
+ 'root': [
+ (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH|CONNECT)( +)([^ ]+)( +)'
+ r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)(\r?\n|\Z)',
+ bygroups(Name.Function, Text, Name.Namespace, Text,
+ Keyword.Reserved, Operator, Number, Text),
+ 'headers'),
+ (r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)( +)(\d{3})(?:( +)([^\r\n]*))?(\r?\n|\Z)',
+ bygroups(Keyword.Reserved, Operator, Number, Text, Number, Text,
+ Name.Exception, Text),
+ 'headers'),
+ ],
+ 'headers': [
+ (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback),
+ (r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback),
+ (r'\r?\n', Text, 'content')
+ ],
+ 'content': [
+ (r'.+', content_callback)
+ ]
+ }
+
+ def analyse_text(text):
+ return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /',
+ 'OPTIONS /', 'TRACE /', 'PATCH /', 'CONNECT '))
+
+
+class TodotxtLexer(RegexLexer):
+ """
+ Lexer for Todo.txt todo list format.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Todotxt'
+ url = 'http://todotxt.com/'
+ aliases = ['todotxt']
+ # *.todotxt is not a standard extension for Todo.txt files; including it
+ # makes testing easier, and also makes autodetecting file type easier.
+ filenames = ['todo.txt', '*.todotxt']
+ mimetypes = ['text/x-todo']
+
+ # Aliases mapping standard token types of Todo.txt format concepts
+ CompleteTaskText = Operator # Chosen to de-emphasize complete tasks
+ IncompleteTaskText = Text # Incomplete tasks should look like plain text
+
+ # Priority should have most emphasis to indicate importance of tasks
+ Priority = Generic.Heading
+ # Dates should have next most emphasis because time is important
+ Date = Generic.Subheading
+
+ # Project and context should have equal weight, and be in different colors
+ Project = Generic.Error
+ Context = String
+
+ # If tag functionality is added, it should have the same weight as Project
+ # and Context, and a different color. Generic.Traceback would work well.
+
+ # Regex patterns for building up rules; dates, priorities, projects, and
+ # contexts are all atomic
+ # TODO: Make date regex more ISO 8601 compliant
+ date_regex = r'\d{4,}-\d{2}-\d{2}'
+ priority_regex = r'\([A-Z]\)'
+ project_regex = r'\+\S+'
+ context_regex = r'@\S+'
+
+ # Compound regex expressions
+ complete_one_date_regex = r'(x )(' + date_regex + r')'
+ complete_two_date_regex = (complete_one_date_regex + r'( )(' +
+ date_regex + r')')
+ priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')'
+
+ tokens = {
+ # Should parse starting at beginning of line; each line is a task
+ 'root': [
+ # Complete task entry points: two total:
+ # 1. Complete task with two dates
+ (complete_two_date_regex, bygroups(CompleteTaskText, Date,
+ CompleteTaskText, Date),
+ 'complete'),
+ # 2. Complete task with one date
+ (complete_one_date_regex, bygroups(CompleteTaskText, Date),
+ 'complete'),
+
+ # Incomplete task entry points: six total:
+ # 1. Priority plus date
+ (priority_date_regex, bygroups(Priority, IncompleteTaskText, Date),
+ 'incomplete'),
+ # 2. Priority only
+ (priority_regex, Priority, 'incomplete'),
+ # 3. Leading date
+ (date_regex, Date, 'incomplete'),
+ # 4. Leading context
+ (context_regex, Context, 'incomplete'),
+ # 5. Leading project
+ (project_regex, Project, 'incomplete'),
+ # 6. Non-whitespace catch-all
+ (r'\S+', IncompleteTaskText, 'incomplete'),
+ ],
+
+ # Parse a complete task
+ 'complete': [
+ # Newline indicates end of task, should return to root
+ (r'\s*\n', CompleteTaskText, '#pop'),
+ # Tokenize contexts and projects
+ (context_regex, Context),
+ (project_regex, Project),
+ # Tokenize non-whitespace text
+ (r'\S+', CompleteTaskText),
+ # Tokenize whitespace not containing a newline
+ (r'\s+', CompleteTaskText),
+ ],
+
+ # Parse an incomplete task
+ 'incomplete': [
+ # Newline indicates end of task, should return to root
+ (r'\s*\n', IncompleteTaskText, '#pop'),
+ # Tokenize contexts and projects
+ (context_regex, Context),
+ (project_regex, Project),
+ # Tokenize non-whitespace text
+ (r'\S+', IncompleteTaskText),
+ # Tokenize whitespace not containing a newline
+ (r'\s+', IncompleteTaskText),
+ ],
+ }
+
+
+class NotmuchLexer(RegexLexer):
+ """
+ For Notmuch email text format.
+
+ .. versionadded:: 2.5
+
+ Additional options accepted:
+
+ `body_lexer`
+ If given, highlight the contents of the message body with the specified
+ lexer, else guess it according to the body content (default: ``None``).
+ """
+
+ name = 'Notmuch'
+ url = 'https://notmuchmail.org/'
+ aliases = ['notmuch']
+
+ def _highlight_code(self, match):
+ code = match.group(1)
+
+ try:
+ if self.body_lexer:
+ lexer = get_lexer_by_name(self.body_lexer)
+ else:
+ lexer = guess_lexer(code.strip())
+ except ClassNotFound:
+ lexer = get_lexer_by_name('text')
+
+ yield from lexer.get_tokens_unprocessed(code)
+
+ tokens = {
+ 'root': [
+ (r'\fmessage\{\s*', Keyword, ('message', 'message-attr')),
+ ],
+ 'message-attr': [
+ (r'(\s*id:\s*)(\S+)', bygroups(Name.Attribute, String)),
+ (r'(\s*(?:depth|match|excluded):\s*)(\d+)',
+ bygroups(Name.Attribute, Number.Integer)),
+ (r'(\s*filename:\s*)(.+\n)',
+ bygroups(Name.Attribute, String)),
+ default('#pop'),
+ ],
+ 'message': [
+ (r'\fmessage\}\n', Keyword, '#pop'),
+ (r'\fheader\{\n', Keyword, 'header'),
+ (r'\fbody\{\n', Keyword, 'body'),
+ ],
+ 'header': [
+ (r'\fheader\}\n', Keyword, '#pop'),
+ (r'((?:Subject|From|To|Cc|Date):\s*)(.*\n)',
+ bygroups(Name.Attribute, String)),
+ (r'(.*)(\s*\(.*\))(\s*\(.*\)\n)',
+ bygroups(Generic.Strong, Literal, Name.Tag)),
+ ],
+ 'body': [
+ (r'\fpart\{\n', Keyword, 'part'),
+ (r'\f(part|attachment)\{\s*', Keyword, ('part', 'part-attr')),
+ (r'\fbody\}\n', Keyword, '#pop'),
+ ],
+ 'part-attr': [
+ (r'(ID:\s*)(\d+)', bygroups(Name.Attribute, Number.Integer)),
+ (r'(,\s*)((?:Filename|Content-id):\s*)([^,]+)',
+ bygroups(Punctuation, Name.Attribute, String)),
+ (r'(,\s*)(Content-type:\s*)(.+\n)',
+ bygroups(Punctuation, Name.Attribute, String)),
+ default('#pop'),
+ ],
+ 'part': [
+ (r'\f(?:part|attachment)\}\n', Keyword, '#pop'),
+ (r'\f(?:part|attachment)\{\s*', Keyword, ('#push', 'part-attr')),
+ (r'^Non-text part: .*\n', Comment),
+ (r'(?s)(.*?(?=\f(?:part|attachment)\}\n))', _highlight_code),
+ ],
+ }
+
+ def analyse_text(text):
+ return 1.0 if text.startswith('\fmessage{') else 0.0
+
+ def __init__(self, **options):
+ self.body_lexer = options.get('body_lexer', None)
+ RegexLexer.__init__(self, **options)
+
+
+class KernelLogLexer(RegexLexer):
+ """
+ For Linux Kernel log ("dmesg") output.
+
+ .. versionadded:: 2.6
+ """
+ name = 'Kernel log'
+ aliases = ['kmsg', 'dmesg']
+ filenames = ['*.kmsg', '*.dmesg']
+
+ tokens = {
+ 'root': [
+ (r'^[^:]+:debug : (?=\[)', Text, 'debug'),
+ (r'^[^:]+:info : (?=\[)', Text, 'info'),
+ (r'^[^:]+:warn : (?=\[)', Text, 'warn'),
+ (r'^[^:]+:notice: (?=\[)', Text, 'warn'),
+ (r'^[^:]+:err : (?=\[)', Text, 'error'),
+ (r'^[^:]+:crit : (?=\[)', Text, 'error'),
+ (r'^(?=\[)', Text, 'unknown'),
+ ],
+ 'unknown': [
+ (r'^(?=.+(warning|notice|audit|deprecated))', Text, 'warn'),
+ (r'^(?=.+(error|critical|fail|Bug))', Text, 'error'),
+ default('info'),
+ ],
+ 'base': [
+ (r'\[[0-9. ]+\] ', Number),
+ (r'(?<=\] ).+?:', Keyword),
+ (r'\n', Text, '#pop'),
+ ],
+ 'debug': [
+ include('base'),
+ (r'.+\n', Comment, '#pop')
+ ],
+ 'info': [
+ include('base'),
+ (r'.+\n', Text, '#pop')
+ ],
+ 'warn': [
+ include('base'),
+ (r'.+\n', Generic.Strong, '#pop')
+ ],
+ 'error': [
+ include('base'),
+ (r'.+\n', Generic.Error, '#pop')
+ ]
+ }
diff --git a/pygments/lexers/theorem.py b/pygments/lexers/theorem.py
new file mode 100644
index 0000000..2fa6900
--- /dev/null
+++ b/pygments/lexers/theorem.py
@@ -0,0 +1,484 @@
+"""
+ pygments.lexers.theorem
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for theorem-proving languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Whitespace
+
+__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer']
+
+
+class CoqLexer(RegexLexer):
+ """
+ For the Coq theorem prover.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'Coq'
+ url = 'http://coq.inria.fr/'
+ aliases = ['coq']
+ filenames = ['*.v']
+ mimetypes = ['text/x-coq']
+
+ flags = 0 # no re.MULTILINE
+
+ keywords1 = (
+ # Vernacular commands
+ 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
+ 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Axioms', 'Hypothesis',
+ 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
+ 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Example', 'Let',
+ 'Ltac', 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
+ 'Arguments', 'Types', 'Unset', 'Contextual', 'Strict', 'Prenex',
+ 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
+ 'Variant', 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Fact',
+ 'Remark', 'Corollary', 'Proposition', 'Property', 'Goal',
+ 'Proof', 'Restart', 'Save', 'Qed', 'Defined', 'Abort', 'Admitted',
+ 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', 'Compute', 'Eval',
+ 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
+ 'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing',
+ 'Universe', 'Polymorphic', 'Monomorphic', 'Context', 'Scheme', 'From',
+ 'Undo', 'Fail', 'Function',
+ )
+ keywords2 = (
+ # Gallina
+ 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
+ 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
+ 'for', 'of', 'nosimpl', 'with', 'as',
+ )
+ keywords3 = (
+ # Sorts
+ 'Type', 'Prop', 'SProp', 'Set',
+ )
+ keywords4 = (
+ # Tactics
+ 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
+ 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
+ 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
+ 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
+ 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
+ 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
+ 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
+ 'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite',
+ 'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity',
+ 'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute',
+ 'native_compute', 'subst',
+ )
+ keywords5 = (
+ # Terminators
+ 'by', 'now', 'done', 'exact', 'reflexivity',
+ 'tauto', 'romega', 'omega', 'lia', 'nia', 'lra', 'nra', 'psatz',
+ 'assumption', 'solve', 'contradiction', 'discriminate',
+ 'congruence', 'admit'
+ )
+ keywords6 = (
+ # Control
+ 'do', 'last', 'first', 'try', 'idtac', 'repeat',
+ )
+ # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
+ # 'downto', 'else', 'end', 'exception', 'external', 'false',
+ # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
+ # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
+ # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
+ # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
+ # 'type', 'val', 'virtual', 'when', 'while', 'with'
+ keyopts = (
+ '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.',
+ '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-',
+ '<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
+ r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>',
+ r'/\\', r'\\/', r'\{\|', r'\|\}',
+ # 'Π', 'Σ', # Not defined in the standard library
+ 'λ', '¬', '∧', '∨', '∀', '∃', '→', '↔', '≠', '≤', '≥',
+ )
+ operators = r'[!$%&*+\./:<=>?@^|~-]'
+ prefix_syms = r'[!?~]'
+ infix_syms = r'[=<>@^|&+\*/$%-]'
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
+ (r'\(\*', Comment, 'comment'),
+ (r'\b(?:[^\W\d][\w\']*\.)+[^\W\d][\w\']*\b', Name),
+ (r'\bEquations\b\??', Keyword.Namespace),
+ # Very weak heuristic to distinguish the Set vernacular from the Set sort
+ (r'\bSet(?=[ \t]+[A-Z][a-z][^\n]*?\.)', Keyword.Namespace),
+ (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
+ (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+ (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
+ (words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
+ # (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name),
+ (r'(%s)' % '|'.join(keyopts[::-1]), Operator),
+ (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
+
+ (r"[^\W\d][\w']*", Name),
+
+ (r'\d[\d_]*', Number.Integer),
+ (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+ (r'0[oO][0-7][0-7_]*', Number.Oct),
+ (r'0[bB][01][01_]*', Number.Bin),
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char),
+
+ (r"'.'", String.Char),
+ (r"'", Keyword), # a stray quote is another syntax element
+
+ (r'"', String.Double, 'string'),
+
+ (r'[~?][a-z][\w\']*:', Name),
+ (r'\S', Name.Builtin.Pseudo),
+ ],
+ 'comment': [
+ (r'[^(*)]+', Comment),
+ (r'\(\*', Comment, '#push'),
+ (r'\*\)', Comment, '#pop'),
+ (r'[(*)]', Comment),
+ ],
+ 'string': [
+ (r'[^"]+', String.Double),
+ (r'""', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'dotted': [
+ (r'\s+', Text),
+ (r'\.', Punctuation),
+ (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+ (r'[A-Z][\w\']*', Name.Class, '#pop'),
+ (r'[a-z][a-z0-9_\']*', Name, '#pop'),
+ default('#pop')
+ ],
+ }
+
+ def analyse_text(text):
+ if 'Qed' in text and 'Proof' in text:
+ return 1
+
+
+class IsabelleLexer(RegexLexer):
+ """
+ For the Isabelle proof assistant.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Isabelle'
+ url = 'https://isabelle.in.tum.de/'
+ aliases = ['isabelle']
+ filenames = ['*.thy']
+ mimetypes = ['text/x-isabelle']
+
+ keyword_minor = (
+ 'and', 'assumes', 'attach', 'avoids', 'binder', 'checking',
+ 'class_instance', 'class_relation', 'code_module', 'congs',
+ 'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes',
+ 'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in',
+ 'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy',
+ 'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes',
+ 'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive',
+ 'pervasive', 'rep_compat', 'shows', 'structure', 'type_class',
+ 'type_constructor', 'unchecked', 'unsafe', 'where',
+ )
+
+ keyword_diag = (
+ 'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms',
+ 'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms',
+ 'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf',
+ 'print_abbrevs', 'print_antiquotations', 'print_attributes',
+ 'print_binds', 'print_bnfs', 'print_bundles',
+ 'print_case_translations', 'print_cases', 'print_claset',
+ 'print_classes', 'print_codeproc', 'print_codesetup',
+ 'print_coercions', 'print_commands', 'print_context',
+ 'print_defn_rules', 'print_dependencies', 'print_facts',
+ 'print_induct_rules', 'print_inductives', 'print_interps',
+ 'print_locale', 'print_locales', 'print_methods', 'print_options',
+ 'print_orders', 'print_quot_maps', 'print_quotconsts',
+ 'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3',
+ 'print_rules', 'print_simpset', 'print_state', 'print_statement',
+ 'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules',
+ 'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status',
+ 'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps',
+ 'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome',
+ 'print_ML_antiquotations', 'print_term_bindings', 'values_prolog',
+ )
+
+ keyword_thy = ('theory', 'begin', 'end')
+
+ keyword_section = ('header', 'chapter')
+
+ keyword_subsection = (
+ 'section', 'subsection', 'subsubsection', 'sect', 'subsect',
+ 'subsubsect',
+ )
+
+ keyword_theory_decl = (
+ 'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities',
+ 'atom_decl', 'attribute_setup', 'axiomatization', 'bundle',
+ 'case_of_simps', 'class', 'classes', 'classrel', 'codatatype',
+ 'code_abort', 'code_class', 'code_const', 'code_datatype',
+ 'code_identifier', 'code_include', 'code_instance', 'code_modulename',
+ 'code_monad', 'code_printing', 'code_reflect', 'code_reserved',
+ 'code_type', 'coinductive', 'coinductive_set', 'consts', 'context',
+ 'datatype', 'datatype_new', 'datatype_new_compat', 'declaration',
+ 'declare', 'default_sort', 'defer_recdef', 'definition', 'defs',
+ 'domain', 'domain_isomorphism', 'domaindef', 'equivariance',
+ 'export_code', 'extract', 'extract_type', 'fixrec', 'fun',
+ 'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type',
+ 'import_const_map', 'import_file', 'import_tptp', 'import_type_map',
+ 'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas',
+ 'lifting_forget', 'lifting_update', 'local_setup', 'locale',
+ 'method_setup', 'nitpick_params', 'no_adhoc_overloading',
+ 'no_notation', 'no_syntax', 'no_translations', 'no_type_notation',
+ 'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle',
+ 'overloading', 'parse_ast_translation', 'parse_translation',
+ 'partial_function', 'primcorec', 'primrec', 'primrec_new',
+ 'print_ast_translation', 'print_translation', 'quickcheck_generator',
+ 'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record',
+ 'refute_params', 'setup', 'setup_lifting', 'simproc_setup',
+ 'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open',
+ 'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions',
+ 'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text',
+ 'text_raw', 'theorems', 'translations', 'type_notation',
+ 'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate',
+ 'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types',
+ 'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate',
+ 'bnf_axiomatization', 'cartouche', 'datatype_compat',
+ 'free_constructors', 'functor', 'nominal_function',
+ 'nominal_termination', 'permanent_interpretation',
+ 'binds', 'defining', 'smt2_status', 'term_cartouche',
+ 'boogie_file', 'text_cartouche',
+ )
+
+ keyword_theory_script = ('inductive_cases', 'inductive_simps')
+
+ keyword_theory_goal = (
+ 'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef',
+ 'crunch', 'crunch_ignore',
+ 'enriched_type', 'function', 'instance', 'interpretation', 'lemma',
+ 'lift_definition', 'nominal_inductive', 'nominal_inductive2',
+ 'nominal_primrec', 'pcpodef', 'primcorecursive',
+ 'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype',
+ 'schematic_corollary', 'schematic_lemma', 'schematic_theorem',
+ 'spark_vc', 'specification', 'subclass', 'sublocale', 'termination',
+ 'theorem', 'typedef', 'wrap_free_constructors',
+ )
+
+ keyword_qed = ('by', 'done', 'qed')
+ keyword_abandon_proof = ('sorry', 'oops')
+
+ keyword_proof_goal = ('have', 'hence', 'interpret')
+
+ keyword_proof_block = ('next', 'proof')
+
+ keyword_proof_chain = (
+ 'finally', 'from', 'then', 'ultimately', 'with',
+ )
+
+ keyword_proof_decl = (
+ 'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note',
+ 'txt', 'txt_raw', 'unfolding', 'using', 'write',
+ )
+
+ keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume')
+
+ keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus')
+
+ keyword_proof_script = (
+ 'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer',
+ )
+
+ operators = (
+ '::', ':', '(', ')', '[', ']', '_', '=', ',', '|',
+ '+', '-', '!', '?',
+ )
+
+ proof_operators = ('{', '}', '.', '..')
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+ (r'\(\*', Comment, 'comment'),
+ (r'\\<open>', String.Symbol, 'cartouche'),
+ (r'\{\*|‹', String, 'cartouche'),
+
+ (words(operators), Operator),
+ (words(proof_operators), Operator.Word),
+
+ (words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
+
+ (words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+
+ (words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword),
+
+ (words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading),
+ (words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading),
+
+ (words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
+ (words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
+
+ (words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error),
+
+ (words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword),
+
+ (words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword),
+
+ (words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
+
+ (r'\\<(\w|\^)*>', Text.Symbol),
+
+ (r"'[^\W\d][.\w']*", Name.Type),
+
+ (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+ (r'0[oO][0-7][0-7_]*', Number.Oct),
+ (r'0[bB][01][01_]*', Number.Bin),
+
+ (r'"', String, 'string'),
+ (r'`', String.Other, 'fact'),
+ (r'[^\s:|\[\]\-()=,+!?{}._][^\s:|\[\]\-()=,+!?{}]*', Name),
+ ],
+ 'comment': [
+ (r'[^(*)]+', Comment),
+ (r'\(\*', Comment, '#push'),
+ (r'\*\)', Comment, '#pop'),
+ (r'[(*)]', Comment),
+ ],
+ 'cartouche': [
+ (r'[^{*}\\‹›]+', String),
+ (r'\\<open>', String.Symbol, '#push'),
+ (r'\{\*|‹', String, '#push'),
+ (r'\\<close>', String.Symbol, '#pop'),
+ (r'\*\}|›', String, '#pop'),
+ (r'\\<(\w|\^)*>', String.Symbol),
+ (r'[{*}\\]', String),
+ ],
+ 'string': [
+ (r'[^"\\]+', String),
+ (r'\\<(\w|\^)*>', String.Symbol),
+ (r'\\"', String),
+ (r'\\', String),
+ (r'"', String, '#pop'),
+ ],
+ 'fact': [
+ (r'[^`\\]+', String.Other),
+ (r'\\<(\w|\^)*>', String.Symbol),
+ (r'\\`', String.Other),
+ (r'\\', String.Other),
+ (r'`', String.Other, '#pop'),
+ ],
+ }
+
+
+class LeanLexer(RegexLexer):
+ """
+ For the Lean theorem prover.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Lean'
+ url = 'https://github.com/leanprover/lean'
+ aliases = ['lean']
+ filenames = ['*.lean']
+ mimetypes = ['text/x-lean']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'/--', String.Doc, 'docstring'),
+ (r'/-', Comment, 'comment'),
+ (r'--.*?$', Comment.Single),
+ (words((
+ 'import', 'renaming', 'hiding',
+ 'namespace',
+ 'local',
+ 'private', 'protected', 'section',
+ 'include', 'omit', 'section',
+ 'protected', 'export',
+ 'open',
+ 'attribute',
+ ), prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
+ (words((
+ 'lemma', 'theorem', 'def', 'definition', 'example',
+ 'axiom', 'axioms', 'constant', 'constants',
+ 'universe', 'universes',
+ 'inductive', 'coinductive', 'structure', 'extends',
+ 'class', 'instance',
+ 'abbreviation',
+
+ 'noncomputable theory',
+
+ 'noncomputable', 'mutual', 'meta',
+
+ 'attribute',
+
+ 'parameter', 'parameters',
+ 'variable', 'variables',
+
+ 'reserve', 'precedence',
+ 'postfix', 'prefix', 'notation', 'infix', 'infixl', 'infixr',
+
+ 'begin', 'by', 'end',
+
+ 'set_option',
+ 'run_cmd',
+ ), prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
+ (r'@\[[^\]]*\]', Keyword.Declaration),
+ (words((
+ 'forall', 'fun', 'Pi', 'from', 'have', 'show', 'assume', 'suffices',
+ 'let', 'if', 'else', 'then', 'in', 'with', 'calc', 'match',
+ 'do'
+ ), prefix=r'\b', suffix=r'\b'), Keyword),
+ (words(('sorry', 'admit'), prefix=r'\b', suffix=r'\b'), Generic.Error),
+ (words(('Sort', 'Prop', 'Type'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
+ (words((
+ '#eval', '#check', '#reduce', '#exit',
+ '#print', '#help',
+ ), suffix=r'\b'), Keyword),
+ (words((
+ '(', ')', ':', '{', '}', '[', ']', '⟨', '⟩', '‹', '›', '⦃', '⦄', ':=', ',',
+ )), Operator),
+ (r'[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]'
+ r'[.A-Za-z_\'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079'
+ r'\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*', Name),
+ (r'0x[A-Za-z0-9]+', Number.Integer),
+ (r'0b[01]+', Number.Integer),
+ (r'\d+', Number.Integer),
+ (r'"', String.Double, 'string'),
+ (r"'(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4})|.)'", String.Char),
+ (r'[~?][a-z][\w\']*:', Name.Variable),
+ (r'\S', Name.Builtin.Pseudo),
+ ],
+ 'comment': [
+ (r'[^/-]', Comment.Multiline),
+ (r'/-', Comment.Multiline, '#push'),
+ (r'-/', Comment.Multiline, '#pop'),
+ (r'[/-]', Comment.Multiline)
+ ],
+ 'docstring': [
+ (r'[^/-]', String.Doc),
+ (r'-/', String.Doc, '#pop'),
+ (r'[/-]', String.Doc)
+ ],
+ 'string': [
+ (r'[^\\"]+', String.Double),
+ (r"(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4}))", String.Escape),
+ ('"', String.Double, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/thingsdb.py b/pygments/lexers/thingsdb.py
new file mode 100644
index 0000000..502b63c
--- /dev/null
+++ b/pygments/lexers/thingsdb.py
@@ -0,0 +1,116 @@
+"""
+ pygments.lexers.thingsdb
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the ThingsDB language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Comment, Keyword, Name, Number, String, Text, \
+ Operator, Punctuation, Whitespace
+
+__all__ = ['ThingsDBLexer']
+
+
+class ThingsDBLexer(RegexLexer):
+ """
+ Lexer for the ThingsDB programming language.
+
+ .. versionadded:: 2.9
+ """
+ name = 'ThingsDB'
+ aliases = ['ti', 'thingsdb']
+ filenames = ['*.ti']
+
+ tokens = {
+ 'root': [
+ include('expression'),
+ ],
+ 'expression': [
+ include('comments'),
+ include('whitespace'),
+
+ # numbers
+ (r'[-+]?0b[01]+', Number.Bin),
+ (r'[-+]?0o[0-8]+', Number.Oct),
+ (r'([-+]?0x[0-9a-fA-F]+)', Number.Hex),
+ (r'[-+]?[0-9]+', Number.Integer),
+ (r'[-+]?((inf|nan)([^0-9A-Za-z_]|$)|[0-9]*\.[0-9]+(e[+-][0-9]+)?)',
+ Number.Float),
+
+ # strings
+ (r'(?:"(?:[^"]*)")+', String.Double),
+ (r"(?:'(?:[^']*)')+", String.Single),
+
+ # literals
+ (r'(true|false|nil)\b', Keyword.Constant),
+
+ # regular expressions
+ (r'(/[^/\\]*(?:\\.[^/\\]*)*/i?)', String.Regex),
+
+ # thing id's
+ (r'#[0-9]+', Comment.Preproc),
+
+ # name, assignments and functions
+ include('names'),
+
+ (r'[(){}\[\],;]', Punctuation),
+ (r'[+\-*/%&|<>^!~@=:?]', Operator),
+ ],
+ 'names': [
+ (r'(\.)'
+ r'(add|call|contains|del|endswith|extend|filter|find|findindex|'
+ r'get|has|id|indexof|keys|len|lower|map|pop|push|remove|set|sort|'
+ r'splice|startswith|test|unwrap|upper|values|wrap)'
+ r'(\()',
+ bygroups(Name.Function, Name.Function, Punctuation), 'arguments'),
+ (r'(array|assert|assert_err|auth_err|backup_info|backups_info|'
+ r'bad_data_err|bool|closure|collection_info|collections_info|'
+ r'counters|deep|del_backup|del_collection|del_expired|del_node|'
+ r'del_procedure|del_token|del_type|del_user|err|float|'
+ r'forbidden_err|grant|int|isarray|isascii|isbool|isbytes|iserr|'
+ r'isfloat|isinf|isint|islist|isnan|isnil|israw|isset|isstr|'
+ r'isthing|istuple|isutf8|lookup_err|max_quota_err|mod_type|new|'
+ r'new_backup|new_collection|new_node|new_procedure|new_token|'
+ r'new_type|new_user|node_err|node_info|nodes_info|now|'
+ r'num_arguments_err|operation_err|overflow_err|procedure_doc|'
+ r'procedure_info|procedures_info|raise|refs|rename_collection|'
+ r'rename_user|reset_counters|return|revoke|run|set_log_level|set|'
+ r'set_quota|set_type|shutdown|str|syntax_err|thing|try|type|'
+ r'type_err|type_count|type_info|types_info|user_info|users_info|'
+ r'value_err|wse|zero_div_err)'
+ r'(\()',
+ bygroups(Name.Function, Punctuation),
+ 'arguments'),
+ (r'(\.[A-Za-z_][0-9A-Za-z_]*)'
+ r'(\s*)(=)',
+ bygroups(Name.Attribute, Text, Operator)),
+ (r'\.[A-Za-z_][0-9A-Za-z_]*', Name.Attribute),
+ (r'([A-Za-z_][0-9A-Za-z_]*)(\s*)(=)',
+ bygroups(Name.Variable, Text, Operator)),
+ (r'[A-Za-z_][0-9A-Za-z_]*', Name.Variable),
+ ],
+ 'whitespace': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ ],
+ 'comments': [
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'arguments': [
+ include('expression'),
+ (',', Punctuation),
+ (r'\(', Punctuation, '#push'),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/tlb.py b/pygments/lexers/tlb.py
new file mode 100644
index 0000000..1990ec9
--- /dev/null
+++ b/pygments/lexers/tlb.py
@@ -0,0 +1,57 @@
+"""
+ pygments.lexers.tlb
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for TL-b.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Operator, Name, \
+ Number, Whitespace, Punctuation, Comment
+
+__all__ = ['TlbLexer']
+
+
+class TlbLexer(RegexLexer):
+ """
+ For TL-b source code.
+ """
+
+ name = 'Tl-b'
+ aliases = ['tlb']
+ filenames = ['*.tlb']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+
+ include('comments'),
+
+ (r'[0-9]+', Number),
+ (words((
+ '+', '-', '*', '=', '?', '~', '.',
+ '^', '==', '<', '>', '<=', '>=', '!='
+ )), Operator),
+ (words(('##', '#<', '#<=')), Name.Tag),
+ (r'#[0-9a-f]*_?', Name.Tag),
+ (r'\$[01]*_?', Name.Tag),
+
+ (r'[a-zA-Z_][0-9a-zA-Z_]*', Name),
+
+ (r'[;():\[\]{}]', Punctuation)
+ ],
+
+ 'comments': [
+ (r'//.*', Comment.Singleline),
+ (r'/\*', Comment.Multiline, 'comment'),
+ ],
+ 'comment': [
+ (r'[^/*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ }
diff --git a/pygments/lexers/tnt.py b/pygments/lexers/tnt.py
new file mode 100644
index 0000000..e0e205d
--- /dev/null
+++ b/pygments/lexers/tnt.py
@@ -0,0 +1,271 @@
+"""
+ pygments.lexers.tnt
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Typographic Number Theory.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer
+from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
+ Punctuation, Error
+
+__all__ = ['TNTLexer']
+
+
+class TNTLexer(Lexer):
+ """
+ Lexer for Typographic Number Theory, as described in the book
+ Gödel, Escher, Bach, by Douglas R. Hofstadter
+
+ .. versionadded:: 2.7
+ """
+
+ name = 'Typographic Number Theory'
+ url = 'https://github.com/Kenny2github/language-tnt'
+ aliases = ['tnt']
+ filenames = ['*.tnt']
+
+ cur = []
+
+ LOGIC = set('⊃→]&∧^|∨Vv')
+ OPERATORS = set('+.⋅*')
+ VARIABLES = set('abcde')
+ PRIMES = set("'′")
+ NEGATORS = set('~!')
+ QUANTIFIERS = set('AE∀∃')
+ NUMBERS = set('0123456789')
+ WHITESPACE = set('\t \v\n')
+
+ RULES = re.compile('''(?xi)
+ joining | separation | double-tilde | fantasy\\ rule
+ | carry[- ]over(?:\\ of)?(?:\\ line)?\\ ([0-9]+) | detachment
+ | contrapositive | De\\ Morgan | switcheroo
+ | specification | generalization | interchange
+ | existence | symmetry | transitivity
+ | add\\ S | drop\\ S | induction
+ | axiom\\ ([1-5]) | premise | push | pop
+ ''')
+ LINENOS = re.compile(r'(?:[0-9]+)(?:(?:, ?|,? and )(?:[0-9]+))*')
+ COMMENT = re.compile(r'\[[^\n\]]+\]')
+
+ def __init__(self, *args, **kwargs):
+ Lexer.__init__(self, *args, **kwargs)
+ self.cur = []
+
+ def whitespace(self, start, text, required=False):
+ """Tokenize whitespace."""
+ end = start
+ try:
+ while text[end] in self.WHITESPACE:
+ end += 1
+ except IndexError:
+ end = len(text)
+ if required and end == start:
+ raise AssertionError
+ if end != start:
+ self.cur.append((start, Text, text[start:end]))
+ return end
+
+ def variable(self, start, text):
+ """Tokenize a variable."""
+ if text[start] not in self.VARIABLES:
+ raise AssertionError
+ end = start+1
+ while text[end] in self.PRIMES:
+ end += 1
+ self.cur.append((start, Name.Variable, text[start:end]))
+ return end
+
+ def term(self, start, text):
+ """Tokenize a term."""
+ if text[start] == 'S': # S...S(...) or S...0
+ end = start+1
+ while text[end] == 'S':
+ end += 1
+ self.cur.append((start, Number.Integer, text[start:end]))
+ return self.term(end, text)
+ if text[start] == '0': # the singleton 0
+ self.cur.append((start, Number.Integer, text[start]))
+ return start+1
+ if text[start] in self.VARIABLES: # a''...
+ return self.variable(start, text)
+ if text[start] == '(': # (...+...)
+ self.cur.append((start, Punctuation, text[start]))
+ start = self.term(start+1, text)
+ if text[start] not in self.OPERATORS:
+ raise AssertionError
+ self.cur.append((start, Operator, text[start]))
+ start = self.term(start+1, text)
+ if text[start] != ')':
+ raise AssertionError
+ self.cur.append((start, Punctuation, text[start]))
+ return start+1
+ raise AssertionError # no matches
+
+ def formula(self, start, text):
+ """Tokenize a formula."""
+ if text[start] in self.NEGATORS: # ~<...>
+ end = start+1
+ while text[end] in self.NEGATORS:
+ end += 1
+ self.cur.append((start, Operator, text[start:end]))
+ return self.formula(end, text)
+ if text[start] in self.QUANTIFIERS: # Aa:<...>
+ self.cur.append((start, Keyword.Declaration, text[start]))
+ start = self.variable(start+1, text)
+ if text[start] != ':':
+ raise AssertionError
+ self.cur.append((start, Punctuation, text[start]))
+ return self.formula(start+1, text)
+ if text[start] == '<': # <...&...>
+ self.cur.append((start, Punctuation, text[start]))
+ start = self.formula(start+1, text)
+ if text[start] not in self.LOGIC:
+ raise AssertionError
+ self.cur.append((start, Operator, text[start]))
+ start = self.formula(start+1, text)
+ if text[start] != '>':
+ raise AssertionError
+ self.cur.append((start, Punctuation, text[start]))
+ return start+1
+ # ...=...
+ start = self.term(start, text)
+ if text[start] != '=':
+ raise AssertionError
+ self.cur.append((start, Operator, text[start]))
+ start = self.term(start+1, text)
+ return start
+
+ def rule(self, start, text):
+ """Tokenize a rule."""
+ match = self.RULES.match(text, start)
+ if match is None:
+ raise AssertionError
+ groups = sorted(match.regs[1:]) # exclude whole match
+ for group in groups:
+ if group[0] >= 0: # this group matched
+ self.cur.append((start, Keyword, text[start:group[0]]))
+ self.cur.append((group[0], Number.Integer,
+ text[group[0]:group[1]]))
+ if group[1] != match.end():
+ self.cur.append((group[1], Keyword,
+ text[group[1]:match.end()]))
+ break
+ else:
+ self.cur.append((start, Keyword, text[start:match.end()]))
+ return match.end()
+
+ def lineno(self, start, text):
+ """Tokenize a line referral."""
+ end = start
+ while text[end] not in self.NUMBERS:
+ end += 1
+ self.cur.append((start, Punctuation, text[start]))
+ self.cur.append((start+1, Text, text[start+1:end]))
+ start = end
+ match = self.LINENOS.match(text, start)
+ if match is None:
+ raise AssertionError
+ if text[match.end()] != ')':
+ raise AssertionError
+ self.cur.append((match.start(), Number.Integer, match.group(0)))
+ self.cur.append((match.end(), Punctuation, text[match.end()]))
+ return match.end() + 1
+
+ def error_till_line_end(self, start, text):
+ """Mark everything from ``start`` to the end of the line as Error."""
+ end = start
+ try:
+ while text[end] != '\n': # there's whitespace in rules
+ end += 1
+ except IndexError:
+ end = len(text)
+ if end != start:
+ self.cur.append((start, Error, text[start:end]))
+ end = self.whitespace(end, text)
+ return end
+
+ def get_tokens_unprocessed(self, text):
+ """Returns a list of TNT tokens."""
+ self.cur = []
+ start = end = self.whitespace(0, text)
+ while start <= end < len(text):
+ try:
+ # try line number
+ while text[end] in self.NUMBERS:
+ end += 1
+ if end != start: # actual number present
+ self.cur.append((start, Number.Integer, text[start:end]))
+ # whitespace is required after a line number
+ orig = len(self.cur)
+ try:
+ start = end = self.whitespace(end, text, True)
+ except AssertionError:
+ del self.cur[orig:]
+ start = end = self.error_till_line_end(end, text)
+ continue
+ # at this point it could be a comment
+ match = self.COMMENT.match(text, start)
+ if match is not None:
+ self.cur.append((start, Comment, text[start:match.end()]))
+ start = end = match.end()
+ # anything after the closing bracket is invalid
+ start = end = self.error_till_line_end(start, text)
+ # do not attempt to process the rest
+ continue
+ del match
+ if text[start] in '[]': # fantasy push or pop
+ self.cur.append((start, Keyword, text[start]))
+ start += 1
+ end += 1
+ else:
+ # one formula, possibly containing subformulae
+ orig = len(self.cur)
+ try:
+ start = end = self.formula(start, text)
+ except (AssertionError, RecursionError): # not well-formed
+ del self.cur[orig:]
+ while text[end] not in self.WHITESPACE:
+ end += 1
+ self.cur.append((start, Error, text[start:end]))
+ start = end
+ # skip whitespace after formula
+ orig = len(self.cur)
+ try:
+ start = end = self.whitespace(end, text, True)
+ except AssertionError:
+ del self.cur[orig:]
+ start = end = self.error_till_line_end(start, text)
+ continue
+ # rule proving this formula a theorem
+ orig = len(self.cur)
+ try:
+ start = end = self.rule(start, text)
+ except AssertionError:
+ del self.cur[orig:]
+ start = end = self.error_till_line_end(start, text)
+ continue
+ # skip whitespace after rule
+ start = end = self.whitespace(end, text)
+ # line marker
+ if text[start] == '(':
+ orig = len(self.cur)
+ try:
+ start = end = self.lineno(start, text)
+ except AssertionError:
+ del self.cur[orig:]
+ start = end = self.error_till_line_end(start, text)
+ continue
+ start = end = self.whitespace(start, text)
+ except IndexError:
+ try:
+ del self.cur[orig:]
+ except NameError:
+ pass # if orig was never defined, fine
+ self.error_till_line_end(start, text)
+ return self.cur
diff --git a/pygments/lexers/trafficscript.py b/pygments/lexers/trafficscript.py
new file mode 100644
index 0000000..b8ef824
--- /dev/null
+++ b/pygments/lexers/trafficscript.py
@@ -0,0 +1,51 @@
+"""
+ pygments.lexers.trafficscript
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for RiverBed's TrafficScript (RTS) language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
+
+__all__ = ['RtsLexer']
+
+
+class RtsLexer(RegexLexer):
+ """
+ For Riverbed Stingray Traffic Manager
+
+ .. versionadded:: 2.1
+ """
+ name = 'TrafficScript'
+ aliases = ['trafficscript', 'rts']
+ filenames = ['*.rts']
+
+ tokens = {
+ 'root' : [
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String),
+ (r'"', String, 'escapable-string'),
+ (r'(0x[0-9a-fA-F]+|\d+)', Number),
+ (r'\d+\.\d+', Number.Float),
+ (r'\$[a-zA-Z](\w|_)*', Name.Variable),
+ (r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
+ (r'[a-zA-Z][\w.]*', Name.Function),
+ (r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
+ (r'(>=|<=|==|!=|'
+ r'&&|\|\||'
+ r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
+ r'>>|<<|'
+ r'\+\+|--|=>)', Operator),
+ (r'[ \t\r]+', Text),
+ (r'#[^\n]*', Comment),
+ ],
+ 'escapable-string' : [
+ (r'\\[tsn]', String.Escape),
+ (r'[^"]', String),
+ (r'"', String, '#pop'),
+ ],
+
+ }
diff --git a/pygments/lexers/typoscript.py b/pygments/lexers/typoscript.py
new file mode 100644
index 0000000..737169a
--- /dev/null
+++ b/pygments/lexers/typoscript.py
@@ -0,0 +1,217 @@
+"""
+ pygments.lexers.typoscript
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for TypoScript
+
+ `TypoScriptLexer`
+ A TypoScript lexer.
+
+ `TypoScriptCssDataLexer`
+ Lexer that highlights markers, constants and registers within css.
+
+ `TypoScriptHtmlDataLexer`
+ Lexer that highlights markers, constants and registers within html tags.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using
+from pygments.token import Text, Comment, Name, String, Number, \
+ Operator, Punctuation
+
+__all__ = ['TypoScriptLexer', 'TypoScriptCssDataLexer', 'TypoScriptHtmlDataLexer']
+
+
+class TypoScriptCssDataLexer(RegexLexer):
+ """
+ Lexer that highlights markers, constants and registers within css blocks.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'TypoScriptCssData'
+ aliases = ['typoscriptcssdata']
+
+ tokens = {
+ 'root': [
+ # marker: ###MARK###
+ (r'(.*)(###\w+###)(.*)', bygroups(String, Name.Constant, String)),
+ # constant: {$some.constant}
+ (r'(\{)(\$)((?:[\w\-]+\.)*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Operator, Name.Constant,
+ Name.Constant, String.Symbol)), # constant
+ # constant: {register:somevalue}
+ (r'(.*)(\{)([\w\-]+)(\s*:\s*)([\w\-]+)(\})(.*)',
+ bygroups(String, String.Symbol, Name.Constant, Operator,
+ Name.Constant, String.Symbol, String)), # constant
+ # whitespace
+ (r'\s+', Text),
+ # comments
+ (r'/\*(?:(?!\*/).)*\*/', Comment),
+ (r'(?<!(#|\'|"))(?:#(?!(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3}))[^\n#]+|//[^\n]*)',
+ Comment),
+ # other
+ (r'[<>,:=.*%+|]', String),
+ (r'[\w"\-!/&;(){}]+', String),
+ ]
+ }
+
+
+class TypoScriptHtmlDataLexer(RegexLexer):
+ """
+ Lexer that highlights markers, constants and registers within html tags.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'TypoScriptHtmlData'
+ aliases = ['typoscripthtmldata']
+
+ tokens = {
+ 'root': [
+ # INCLUDE_TYPOSCRIPT
+ (r'(INCLUDE_TYPOSCRIPT)', Name.Class),
+ # Language label or extension resource FILE:... or LLL:... or EXT:...
+ (r'(EXT|FILE|LLL):[^}\n"]*', String),
+ # marker: ###MARK###
+ (r'(.*)(###\w+###)(.*)', bygroups(String, Name.Constant, String)),
+ # constant: {$some.constant}
+ (r'(\{)(\$)((?:[\w\-]+\.)*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Operator, Name.Constant,
+ Name.Constant, String.Symbol)), # constant
+ # constant: {register:somevalue}
+ (r'(.*)(\{)([\w\-]+)(\s*:\s*)([\w\-]+)(\})(.*)',
+ bygroups(String, String.Symbol, Name.Constant, Operator,
+ Name.Constant, String.Symbol, String)), # constant
+ # whitespace
+ (r'\s+', Text),
+ # other
+ (r'[<>,:=.*%+|]', String),
+ (r'[\w"\-!/&;(){}#]+', String),
+ ]
+ }
+
+
+class TypoScriptLexer(RegexLexer):
+ """
+ Lexer for TypoScript code.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'TypoScript'
+ url = 'http://docs.typo3.org/typo3cms/TyposcriptReference/'
+ aliases = ['typoscript']
+ filenames = ['*.typoscript']
+ mimetypes = ['text/x-typoscript']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('comment'),
+ include('constant'),
+ include('html'),
+ include('label'),
+ include('whitespace'),
+ include('keywords'),
+ include('punctuation'),
+ include('operator'),
+ include('structure'),
+ include('literal'),
+ include('other'),
+ ],
+ 'keywords': [
+ # Conditions
+ (r'(?i)(\[)(browser|compatVersion|dayofmonth|dayofweek|dayofyear|'
+ r'device|ELSE|END|GLOBAL|globalString|globalVar|hostname|hour|IP|'
+ r'language|loginUser|loginuser|minute|month|page|PIDinRootline|'
+ r'PIDupinRootline|system|treeLevel|useragent|userFunc|usergroup|'
+ r'version)([^\]]*)(\])',
+ bygroups(String.Symbol, Name.Constant, Text, String.Symbol)),
+ # Functions
+ (r'(?=[\w\-])(HTMLparser|HTMLparser_tags|addParams|cache|encapsLines|'
+ r'filelink|if|imageLinkWrap|imgResource|makelinks|numRows|numberFormat|'
+ r'parseFunc|replacement|round|select|split|stdWrap|strPad|tableStyle|'
+ r'tags|textStyle|typolink)(?![\w\-])', Name.Function),
+ # Toplevel objects and _*
+ (r'(?:(=?\s*<?\s+|^\s*))(cObj|field|config|content|constants|FEData|'
+ r'file|frameset|includeLibs|lib|page|plugin|register|resources|sitemap|'
+ r'sitetitle|styles|temp|tt_[^:.\s]*|types|xmlnews|INCLUDE_TYPOSCRIPT|'
+ r'_CSS_DEFAULT_STYLE|_DEFAULT_PI_VARS|_LOCAL_LANG)(?![\w\-])',
+ bygroups(Operator, Name.Builtin)),
+ # Content objects
+ (r'(?=[\w\-])(CASE|CLEARGIF|COA|COA_INT|COBJ_ARRAY|COLUMNS|CONTENT|'
+ r'CTABLE|EDITPANEL|FILE|FILES|FLUIDTEMPLATE|FORM|HMENU|HRULER|HTML|'
+ r'IMAGE|IMGTEXT|IMG_RESOURCE|LOAD_REGISTER|MEDIA|MULTIMEDIA|OTABLE|'
+ r'PAGE|QTOBJECT|RECORDS|RESTORE_REGISTER|SEARCHRESULT|SVG|SWFOBJECT|'
+ r'TEMPLATE|TEXT|USER|USER_INT)(?![\w\-])', Name.Class),
+ # Menu states
+ (r'(?=[\w\-])(ACTIFSUBRO|ACTIFSUB|ACTRO|ACT|CURIFSUBRO|CURIFSUB|CURRO|'
+ r'CUR|IFSUBRO|IFSUB|NO|SPC|USERDEF1RO|USERDEF1|USERDEF2RO|USERDEF2|'
+ r'USRRO|USR)', Name.Class),
+ # Menu objects
+ (r'(?=[\w\-])(GMENU_FOLDOUT|GMENU_LAYERS|GMENU|IMGMENUITEM|IMGMENU|'
+ r'JSMENUITEM|JSMENU|TMENUITEM|TMENU_LAYERS|TMENU)', Name.Class),
+ # PHP objects
+ (r'(?=[\w\-])(PHP_SCRIPT(_EXT|_INT)?)', Name.Class),
+ (r'(?=[\w\-])(userFunc)(?![\w\-])', Name.Function),
+ ],
+ 'whitespace': [
+ (r'\s+', Text),
+ ],
+ 'html': [
+ (r'<\S[^\n>]*>', using(TypoScriptHtmlDataLexer)),
+ (r'&[^;\n]*;', String),
+ (r'(?s)(_CSS_DEFAULT_STYLE)(\s*)(\()(.*(?=\n\)))',
+ bygroups(Name.Class, Text, String.Symbol, using(TypoScriptCssDataLexer))),
+ ],
+ 'literal': [
+ (r'0x[0-9A-Fa-f]+t?', Number.Hex),
+ # (r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?\s*(?:[^=])', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'(###\w+###)', Name.Constant),
+ ],
+ 'label': [
+ # Language label or extension resource FILE:... or LLL:... or EXT:...
+ (r'(EXT|FILE|LLL):[^}\n"]*', String),
+ # Path to a resource
+ (r'(?![^\w\-])([\w\-]+(?:/[\w\-]+)+/?)(\S*\n)',
+ bygroups(String, String)),
+ ],
+ 'punctuation': [
+ (r'[,.]', Punctuation),
+ ],
+ 'operator': [
+ (r'[<>,:=.*%+|]', Operator),
+ ],
+ 'structure': [
+ # Brackets and braces
+ (r'[{}()\[\]\\]', String.Symbol),
+ ],
+ 'constant': [
+ # Constant: {$some.constant}
+ (r'(\{)(\$)((?:[\w\-]+\.)*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Operator, Name.Constant,
+ Name.Constant, String.Symbol)), # constant
+ # Constant: {register:somevalue}
+ (r'(\{)([\w\-]+)(\s*:\s*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Name.Constant, Operator,
+ Name.Constant, String.Symbol)), # constant
+ # Hex color: #ff0077
+ (r'(#[a-fA-F0-9]{6}\b|#[a-fA-F0-9]{3}\b)', String.Char)
+ ],
+ 'comment': [
+ (r'(?<!(#|\'|"))(?:#(?!(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3}))[^\n#]+|//[^\n]*)',
+ Comment),
+ (r'/\*(?:(?!\*/).)*\*/', Comment),
+ (r'(\s*#\s*\n)', Comment),
+ ],
+ 'other': [
+ (r'[\w"\-!/&;]+', Text),
+ ],
+ }
diff --git a/pygments/lexers/ul4.py b/pygments/lexers/ul4.py
new file mode 100644
index 0000000..1530a52
--- /dev/null
+++ b/pygments/lexers/ul4.py
@@ -0,0 +1,267 @@
+"""
+ pygments.lexers.ul4
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the UL4 templating language.
+
+ More information: https://python.livinglogic.de/UL4.html
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, words, include
+from pygments.token import Comment, Text, Keyword, String, Number, Literal, \
+ Name, Other, Operator
+from pygments.lexers.web import HtmlLexer, XmlLexer, CssLexer, JavascriptLexer
+from pygments.lexers.python import PythonLexer
+
+__all__ = ['UL4Lexer', 'HTMLUL4Lexer', 'XMLUL4Lexer', 'CSSUL4Lexer',
+ 'JavascriptUL4Lexer', 'PythonUL4Lexer']
+
+
+class UL4Lexer(RegexLexer):
+ """
+ Generic lexer for UL4.
+
+ .. versionadded:: 2.12
+ """
+
+ flags = re.MULTILINE | re.DOTALL
+
+ name = 'UL4'
+ aliases = ['ul4']
+ filenames = ['*.ul4']
+
+ tokens = {
+ "root": [
+ (
+ # Template header without name:
+ # ``<?ul4?>``
+ r"(<\?)(\s*)(ul4)(\s*)(\?>)",
+ bygroups(Comment.Preproc, Text.Whitespace, Keyword,
+ Text.Whitespace, Comment.Preproc),
+ ),
+ (
+ # Template header with name (potentially followed by the signature):
+ # ``<?ul4 foo(bar=42)?>``
+ r"(<\?)(\s*)(ul4)(\s*)([a-zA-Z_][a-zA-Z_0-9]*)?",
+ bygroups(Comment.Preproc, Text.Whitespace, Keyword,
+ Text.Whitespace, Name.Function),
+ "ul4", # Switch to "expression" mode
+ ),
+ (
+ # Comment:
+ # ``<?note foobar?>``
+ r"<\?\s*note\s.*?\?>",
+ Comment,
+ ),
+ (
+ # Template documentation:
+ # ``<?doc foobar?>``
+ r"<\?\s*doc\s.*?\?>",
+ String.Doc,
+ ),
+ (
+ # ``<?ignore?>`` tag for commenting out code:
+ # ``<?ignore?>...<?end ignore?>``
+ r"<\?\s*ignore\s*\?>",
+ Comment,
+ "ignore", # Switch to "ignore" mode
+ ),
+ (
+ # ``<?def?>`` tag for defining local templates
+ # ``<?def foo(bar=42)?>...<?end def?>``
+ r"(<\?)(\s*)(def)(\s*)([a-zA-Z_][a-zA-Z_0-9]*)?",
+ bygroups(Comment.Preproc, Text.Whitespace, Keyword,
+ Text.Whitespace, Name.Function),
+ "ul4", # Switch to "expression" mode
+ ),
+ (
+ # The rest of the supported tags
+ r"(<\?)(\s*)(printx|print|for|if|elif|else|while|code|renderblocks?|render)\b",
+ bygroups(Comment.Preproc, Text.Whitespace, Keyword),
+ "ul4", # Switch to "expression" mode
+ ),
+ (
+ # ``<?end?>`` tag for ending ``<?def?>``, ``<?for?>``,
+ # ``<?if?>``, ``<?while?>``, ``<?renderblock?>`` and
+ # ``<?renderblocks?>`` blocks.
+ r"(<\?)(\s*)(end)\b",
+ bygroups(Comment.Preproc, Text.Whitespace, Keyword),
+ "end", # Switch to "end tag" mode
+ ),
+ (
+ # ``<?whitespace?>`` tag for configuring whitespace handlng
+ r"(<\?)(\s*)(whitespace)\b",
+ bygroups(Comment.Preproc, Text.Whitespace, Keyword),
+ "whitespace", # Switch to "whitespace" mode
+ ),
+ # Plain text
+ (r"[^<]+", Other),
+ (r"<", Other),
+ ],
+ # Ignore mode ignores everything upto the matching ``<?end ignore?>`` tag
+ "ignore": [
+ # Nested ``<?ignore?>`` tag
+ (r"<\?\s*ignore\s*\?>", Comment, "#push"),
+ # ``<?end ignore?>`` tag
+ (r"<\?\s*end\s+ignore\s*\?>", Comment, "#pop"),
+ # Everything else
+ (r"[^<]+", Comment),
+ (r".", Comment),
+ ],
+ # UL4 expressions
+ "ul4": [
+ # End the tag
+ (r"\?>", Comment.Preproc, "#pop"),
+ # Start triple quoted string constant
+ ("'''", String, "string13"),
+ ('"""', String, "string23"),
+ # Start single quoted string constant
+ ("'", String, "string1"),
+ ('"', String, "string2"),
+ # Floating point number
+ (r"\d+\.\d*([eE][+-]?\d+)?", Number.Float),
+ (r"\.\d+([eE][+-]?\d+)?", Number.Float),
+ (r"\d+[eE][+-]?\d+", Number.Float),
+ # Binary integer: ``0b101010``
+ (r"0[bB][01]+", Number.Bin),
+ # Octal integer: ``0o52``
+ (r"0[oO][0-7]+", Number.Oct),
+ # Hexadecimal integer: ``0x2a``
+ (r"0[xX][0-9a-fA-F]+", Number.Hex),
+ # Date or datetime: ``@(2000-02-29)``/``@(2000-02-29T12:34:56.987654)``
+ (r"@\(\d\d\d\d-\d\d-\d\d(T(\d\d:\d\d(:\d\d(\.\d{6})?)?)?)?\)", Literal.Date),
+ # Color: ``#fff``, ``#fff8f0`` etc.
+ (r"#[0-9a-fA-F]{8}", Literal.Color),
+ (r"#[0-9a-fA-F]{6}", Literal.Color),
+ (r"#[0-9a-fA-F]{3,4}", Literal.Color),
+ # Decimal integer: ``42``
+ (r"\d+", Number.Integer),
+ # Operators
+ (r"//|==|!=|>=|<=|<<|>>|\+=|-=|\*=|/=|//=|<<=|>>=|&=|\|=|^=|=|[\[\]{},:*/().~%&|<>^+-]", Operator),
+ # Keywords
+ (words(("for", "in", "if", "else", "not", "is", "and", "or"), suffix=r"\b"), Keyword),
+ # Builtin constants
+ (words(("None", "False", "True"), suffix=r"\b"), Keyword.Constant),
+ # Variable names
+ (r"[a-zA-Z_][a-zA-Z0-9_]*", Name),
+ # Whitespace
+ (r"\s+", Text.Whitespace),
+ ],
+ # ``<?end ...?>`` tag for closing the last open block
+ "end": [
+ (r"\?>", Comment.Preproc, "#pop"),
+ (words(("for", "if", "def", "while", "renderblock", "renderblocks"), suffix=r"\b"), Keyword),
+ (r"\s+", Text),
+ ],
+ # Content of the ``<?whitespace ...?>`` tag:
+ # ``keep``, ``strip`` or ``smart``
+ "whitespace": [
+ (r"\?>", Comment.Preproc, "#pop"),
+ (words(("keep", "strip", "smart"), suffix=r"\b"), Comment.Preproc),
+ (r"\s+", Text.Whitespace),
+ ],
+ # Inside a string constant
+ "stringescapes": [
+ (r"""\\[\\'"abtnfr]""", String.Escape),
+ (r"\\x[0-9a-fA-F]{2}", String.Escape),
+ (r"\\u[0-9a-fA-F]{4}", String.Escape),
+ (r"\\U[0-9a-fA-F]{8}", String.Escape),
+ ],
+ # Inside a triple quoted string started with ``'''``
+ "string13": [
+ (r"'''", String, "#pop"),
+ include("stringescapes"),
+ (r"[^\\']+", String),
+ (r'.', String),
+ ],
+ # Inside a triple quoted string started with ``"""``
+ "string23": [
+ (r'"""', String, "#pop"),
+ include("stringescapes"),
+ (r'[^\\"]+', String),
+ (r'.', String),
+ ],
+ # Inside a single quoted string started with ``'``
+ "string1": [
+ (r"'", String, "#pop"),
+ include("stringescapes"),
+ (r"[^\\']+", String),
+ (r'.', String),
+ ],
+ # Inside a single quoted string started with ``"``
+ "string2": [
+ (r'"', String, "#pop"),
+ include("stringescapes"),
+ (r'[^\\"]+', String),
+ (r'.', String),
+ ],
+ }
+
+class HTMLUL4Lexer(DelegatingLexer):
+ """
+ Lexer for UL4 embedded in HTML.
+ """
+
+ name = 'HTML+UL4'
+ aliases = ['html+ul4']
+ filenames = ['*.htmlul4']
+
+ def __init__(self, **options):
+ super().__init__(HtmlLexer, UL4Lexer, **options)
+
+
+class XMLUL4Lexer(DelegatingLexer):
+ """
+ Lexer for UL4 embedded in XML.
+ """
+
+ name = 'XML+UL4'
+ aliases = ['xml+ul4']
+ filenames = ['*.xmlul4']
+
+ def __init__(self, **options):
+ super().__init__(XmlLexer, UL4Lexer, **options)
+
+
+class CSSUL4Lexer(DelegatingLexer):
+ """
+ Lexer for UL4 embedded in CSS.
+ """
+
+ name = 'CSS+UL4'
+ aliases = ['css+ul4']
+ filenames = ['*.cssul4']
+
+ def __init__(self, **options):
+ super().__init__(CssLexer, UL4Lexer, **options)
+
+
+class JavascriptUL4Lexer(DelegatingLexer):
+ """
+ Lexer for UL4 embedded in Javascript.
+ """
+
+ name = 'Javascript+UL4'
+ aliases = ['js+ul4']
+ filenames = ['*.jsul4']
+
+ def __init__(self, **options):
+ super().__init__(JavascriptLexer, UL4Lexer, **options)
+
+
+class PythonUL4Lexer(DelegatingLexer):
+ """
+ Lexer for UL4 embedded in Python.
+ """
+
+ name = 'Python+UL4'
+ aliases = ['py+ul4']
+ filenames = ['*.pyul4']
+
+ def __init__(self, **options):
+ super().__init__(PythonLexer, UL4Lexer, **options)
diff --git a/pygments/lexers/unicon.py b/pygments/lexers/unicon.py
new file mode 100644
index 0000000..3d7311a
--- /dev/null
+++ b/pygments/lexers/unicon.py
@@ -0,0 +1,411 @@
+"""
+ pygments.lexers.unicon
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Icon and Unicon languages, including ucode VM.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, words, using, this
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['IconLexer', 'UcodeLexer', 'UniconLexer']
+
+
+class UniconLexer(RegexLexer):
+ """
+ For Unicon source code.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Unicon'
+ aliases = ['unicon']
+ filenames = ['*.icn']
+ mimetypes = ['text/unicon']
+
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'#.*?\n', Comment.Single),
+ (r'[^\S\n]+', Text),
+ (r'class|method|procedure', Keyword.Declaration, 'subprogram'),
+ (r'(record)(\s+)(\w+)',
+ bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
+ (r'(#line|\$C|\$Cend|\$define|\$else|\$endif|\$error|\$ifdef|'
+ r'\$ifndef|\$include|\$line|\$undef)\b', Keyword.PreProc),
+ (r'(&null|&fail)\b', Keyword.Constant),
+ (r'&allocated|&ascii|&clock|&collections|&column|&col|&control|'
+ r'&cset|&current|&dateline|&date|&digits|&dump|'
+ r'&errno|&errornumber|&errortext|&errorvalue|&error|&errout|'
+ r'&eventcode|&eventvalue|&eventsource|&e|'
+ r'&features|&file|&host|&input|&interval|&lcase|&letters|'
+ r'&level|&line|&ldrag|&lpress|&lrelease|'
+ r'&main|&mdrag|&meta|&mpress|&mrelease|&now|&output|'
+ r'&phi|&pick|&pi|&pos|&progname|'
+ r'&random|&rdrag|&regions|&resize|&row|&rpress|&rrelease|'
+ r'&shift|&source|&storage|&subject|'
+ r'&time|&trace|&ucase|&version|'
+ r'&window|&x|&y', Keyword.Reserved),
+ (r'(by|of|not|to)\b', Keyword.Reserved),
+ (r'(global|local|static|abstract)\b', Keyword.Reserved),
+ (r'package|link|import', Keyword.Declaration),
+ (words((
+ 'break', 'case', 'create', 'critical', 'default', 'end', 'all',
+ 'do', 'else', 'every', 'fail', 'if', 'import', 'initial',
+ 'initially', 'invocable', 'next',
+ 'repeat', 'return', 'suspend',
+ 'then', 'thread', 'until', 'while'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ (words((
+ 'Abort', 'abs', 'acos', 'Active', 'Alert', 'any', 'Any', 'Arb',
+ 'Arbno', 'args', 'array', 'asin', 'atan', 'atanh', 'Attrib',
+ 'Bal', 'bal', 'Bg', 'Break', 'Breakx',
+ 'callout', 'center', 'char', 'chdir', 'chmod', 'chown', 'chroot',
+ 'classname', 'Clip', 'Clone', 'close', 'cofail', 'collect',
+ 'Color', 'ColorValue', 'condvar', 'constructor', 'copy',
+ 'CopyArea', 'cos', 'Couple', 'crypt', 'cset', 'ctime',
+ 'dbcolumns', 'dbdriver', 'dbkeys', 'dblimits', 'dbproduct',
+ 'dbtables', 'delay', 'delete', 'detab', 'display', 'DrawArc',
+ 'DrawCircle', 'DrawCube', 'DrawCurve', 'DrawCylinder',
+ 'DrawDisk', 'DrawImage', 'DrawLine', 'DrawPoint', 'DrawPolygon',
+ 'DrawRectangle', 'DrawSegment', 'DrawSphere', 'DrawString',
+ 'DrawTorus', 'dtor',
+ 'entab', 'EraseArea', 'errorclear', 'Event', 'eventmask',
+ 'EvGet', 'EvSend', 'exec', 'exit', 'exp', 'Eye',
+ 'Fail', 'fcntl', 'fdup', 'Fence', 'fetch', 'Fg', 'fieldnames',
+ 'filepair', 'FillArc', 'FillCircle', 'FillPolygon',
+ 'FillRectangle', 'find', 'flock', 'flush', 'Font', 'fork',
+ 'FreeColor', 'FreeSpace', 'function',
+ 'get', 'getch', 'getche', 'getegid', 'getenv', 'geteuid',
+ 'getgid', 'getgr', 'gethost', 'getpgrp', 'getpid', 'getppid',
+ 'getpw', 'getrusage', 'getserv', 'GetSpace', 'gettimeofday',
+ 'getuid', 'globalnames', 'GotoRC', 'GotoXY', 'gtime', 'hardlink',
+ 'iand', 'icom', 'IdentityMatrix', 'image', 'InPort', 'insert',
+ 'Int86', 'integer', 'ioctl', 'ior', 'ishift', 'istate', 'ixor',
+ 'kbhit', 'key', 'keyword', 'kill',
+ 'left', 'Len', 'list', 'load', 'loadfunc', 'localnames',
+ 'lock', 'log', 'Lower', 'lstat',
+ 'many', 'map', 'match', 'MatrixMode', 'max', 'member',
+ 'membernames', 'methodnames', 'methods', 'min', 'mkdir', 'move',
+ 'MultMatrix', 'mutex',
+ 'name', 'NewColor', 'Normals', 'NotAny', 'numeric',
+ 'open', 'opencl', 'oprec', 'ord', 'OutPort',
+ 'PaletteChars', 'PaletteColor', 'PaletteKey', 'paramnames',
+ 'parent', 'Pattern', 'Peek', 'Pending', 'pipe', 'Pixel',
+ 'PlayAudio', 'Poke', 'pop', 'PopMatrix', 'Pos', 'pos',
+ 'proc', 'pull', 'push', 'PushMatrix', 'PushRotate', 'PushScale',
+ 'PushTranslate', 'put',
+ 'QueryPointer',
+ 'Raise', 'read', 'ReadImage', 'readlink', 'reads', 'ready',
+ 'real', 'receive', 'Refresh', 'Rem', 'remove', 'rename',
+ 'repl', 'reverse', 'right', 'rmdir', 'Rotate', 'Rpos',
+ 'Rtab', 'rtod', 'runerr',
+ 'save', 'Scale', 'seek', 'select', 'send', 'seq',
+ 'serial', 'set', 'setenv', 'setgid', 'setgrent',
+ 'sethostent', 'setpgrp', 'setpwent', 'setservent',
+ 'setuid', 'signal', 'sin', 'sort', 'sortf', 'Span',
+ 'spawn', 'sql', 'sqrt', 'stat', 'staticnames', 'stop',
+ 'StopAudio', 'string', 'structure', 'Succeed', 'Swi',
+ 'symlink', 'sys_errstr', 'system', 'syswrite',
+ 'Tab', 'tab', 'table', 'tan',
+ 'Texcoord', 'Texture', 'TextWidth', 'Translate',
+ 'trap', 'trim', 'truncate', 'trylock', 'type',
+ 'umask', 'Uncouple', 'unlock', 'upto', 'utime',
+ 'variable', 'VAttrib',
+ 'wait', 'WAttrib', 'WDefault', 'WFlush', 'where',
+ 'WinAssociate', 'WinButton', 'WinColorDialog', 'WindowContents',
+ 'WinEditRegion', 'WinFontDialog', 'WinMenuBar', 'WinOpenDialog',
+ 'WinPlayMedia', 'WinSaveDialog', 'WinScrollBar', 'WinSelectDialog',
+ 'write', 'WriteImage', 'writes', 'WSection',
+ 'WSync'), prefix=r'\b', suffix=r'\b'),
+ Name.Function),
+ include('numbers'),
+ (r'<@|<<@|>@|>>@|\.>|->|===|~===|\*\*|\+\+|--|\.|~==|~=|<=|>=|==|'
+ r'=|<<=|<<|>>=|>>|:=:|:=|->|<->|\+:=|\|', Operator),
+ (r'"(?:[^\\"]|\\.)*"', String),
+ (r"'(?:[^\\']|\\.)*'", String.Character),
+ (r'[*<>+=/&!?@~\\-]', Operator),
+ (r'\^', Operator),
+ (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
+ (r"[\[\]]", Punctuation),
+ (r"<>|=>|[()|:;,.'`{}%&?]", Punctuation),
+ (r'\n+', Text),
+ ],
+ 'numbers': [
+ (r'\b([+-]?([2-9]|[12][0-9]|3[0-6])[rR][0-9a-zA-Z]+)\b', Number.Hex),
+ (r'[+-]?[0-9]*\.([0-9]*)([Ee][+-]?[0-9]*)?', Number.Float),
+ (r'\b([+-]?[0-9]+[KMGTPkmgtp]?)\b', Number.Integer),
+ ],
+ 'subprogram': [
+ (r'\(', Punctuation, ('#pop', 'formal_part')),
+ (r';', Punctuation, '#pop'),
+ (r'"[^"]+"|\w+', Name.Function),
+ include('root'),
+ ],
+ 'type_def': [
+ (r'\(', Punctuation, 'formal_part'),
+ ],
+ 'formal_part': [
+ (r'\)', Punctuation, '#pop'),
+ (r'\w+', Name.Variable),
+ (r',', Punctuation),
+ (r'(:string|:integer|:real)\b', Keyword.Reserved),
+ include('root'),
+ ],
+ }
+
+
+class IconLexer(RegexLexer):
+ """
+ Lexer for Icon.
+
+ .. versionadded:: 1.6
+ """
+ name = 'Icon'
+ aliases = ['icon']
+ filenames = ['*.icon', '*.ICON']
+ mimetypes = []
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'#.*?\n', Comment.Single),
+ (r'[^\S\n]+', Text),
+ (r'class|method|procedure', Keyword.Declaration, 'subprogram'),
+ (r'(record)(\s+)(\w+)',
+ bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
+ (r'(#line|\$C|\$Cend|\$define|\$else|\$endif|\$error|\$ifdef|'
+ r'\$ifndef|\$include|\$line|\$undef)\b', Keyword.PreProc),
+ (r'(&null|&fail)\b', Keyword.Constant),
+ (r'&allocated|&ascii|&clock|&collections|&column|&col|&control|'
+ r'&cset|&current|&dateline|&date|&digits|&dump|'
+ r'&errno|&errornumber|&errortext|&errorvalue|&error|&errout|'
+ r'&eventcode|&eventvalue|&eventsource|&e|'
+ r'&features|&file|&host|&input|&interval|&lcase|&letters|'
+ r'&level|&line|&ldrag|&lpress|&lrelease|'
+ r'&main|&mdrag|&meta|&mpress|&mrelease|&now|&output|'
+ r'&phi|&pick|&pi|&pos|&progname|'
+ r'&random|&rdrag|&regions|&resize|&row|&rpress|&rrelease|'
+ r'&shift|&source|&storage|&subject|'
+ r'&time|&trace|&ucase|&version|'
+ r'&window|&x|&y', Keyword.Reserved),
+ (r'(by|of|not|to)\b', Keyword.Reserved),
+ (r'(global|local|static)\b', Keyword.Reserved),
+ (r'link', Keyword.Declaration),
+ (words((
+ 'break', 'case', 'create', 'default', 'end', 'all',
+ 'do', 'else', 'every', 'fail', 'if', 'initial',
+ 'invocable', 'next',
+ 'repeat', 'return', 'suspend',
+ 'then', 'until', 'while'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Reserved),
+ (words((
+ 'abs', 'acos', 'Active', 'Alert', 'any',
+ 'args', 'array', 'asin', 'atan', 'atanh', 'Attrib',
+ 'bal', 'Bg',
+ 'callout', 'center', 'char', 'chdir', 'chmod', 'chown', 'chroot',
+ 'Clip', 'Clone', 'close', 'cofail', 'collect',
+ 'Color', 'ColorValue', 'condvar', 'copy',
+ 'CopyArea', 'cos', 'Couple', 'crypt', 'cset', 'ctime',
+ 'delay', 'delete', 'detab', 'display', 'DrawArc',
+ 'DrawCircle', 'DrawCube', 'DrawCurve', 'DrawCylinder',
+ 'DrawDisk', 'DrawImage', 'DrawLine', 'DrawPoint', 'DrawPolygon',
+ 'DrawRectangle', 'DrawSegment', 'DrawSphere', 'DrawString',
+ 'DrawTorus', 'dtor',
+ 'entab', 'EraseArea', 'errorclear', 'Event', 'eventmask',
+ 'EvGet', 'EvSend', 'exec', 'exit', 'exp', 'Eye',
+ 'fcntl', 'fdup', 'fetch', 'Fg', 'fieldnames',
+ 'FillArc', 'FillCircle', 'FillPolygon',
+ 'FillRectangle', 'find', 'flock', 'flush', 'Font',
+ 'FreeColor', 'FreeSpace', 'function',
+ 'get', 'getch', 'getche', 'getenv',
+ 'GetSpace', 'gettimeofday',
+ 'getuid', 'globalnames', 'GotoRC', 'GotoXY', 'gtime', 'hardlink',
+ 'iand', 'icom', 'IdentityMatrix', 'image', 'InPort', 'insert',
+ 'Int86', 'integer', 'ioctl', 'ior', 'ishift', 'istate', 'ixor',
+ 'kbhit', 'key', 'keyword', 'kill',
+ 'left', 'Len', 'list', 'load', 'loadfunc', 'localnames',
+ 'lock', 'log', 'Lower', 'lstat',
+ 'many', 'map', 'match', 'MatrixMode', 'max', 'member',
+ 'membernames', 'methodnames', 'methods', 'min', 'mkdir', 'move',
+ 'MultMatrix', 'mutex',
+ 'name', 'NewColor', 'Normals', 'numeric',
+ 'open', 'opencl', 'oprec', 'ord', 'OutPort',
+ 'PaletteChars', 'PaletteColor', 'PaletteKey', 'paramnames',
+ 'parent', 'Pattern', 'Peek', 'Pending', 'pipe', 'Pixel',
+ 'Poke', 'pop', 'PopMatrix', 'Pos', 'pos',
+ 'proc', 'pull', 'push', 'PushMatrix', 'PushRotate', 'PushScale',
+ 'PushTranslate', 'put',
+ 'QueryPointer',
+ 'Raise', 'read', 'ReadImage', 'readlink', 'reads', 'ready',
+ 'real', 'receive', 'Refresh', 'Rem', 'remove', 'rename',
+ 'repl', 'reverse', 'right', 'rmdir', 'Rotate', 'Rpos',
+ 'rtod', 'runerr',
+ 'save', 'Scale', 'seek', 'select', 'send', 'seq',
+ 'serial', 'set', 'setenv',
+ 'setuid', 'signal', 'sin', 'sort', 'sortf',
+ 'spawn', 'sql', 'sqrt', 'stat', 'staticnames', 'stop',
+ 'string', 'structure', 'Swi',
+ 'symlink', 'sys_errstr', 'system', 'syswrite',
+ 'tab', 'table', 'tan',
+ 'Texcoord', 'Texture', 'TextWidth', 'Translate',
+ 'trap', 'trim', 'truncate', 'trylock', 'type',
+ 'umask', 'Uncouple', 'unlock', 'upto', 'utime',
+ 'variable',
+ 'wait', 'WAttrib', 'WDefault', 'WFlush', 'where',
+ 'WinAssociate', 'WinButton', 'WinColorDialog', 'WindowContents',
+ 'WinEditRegion', 'WinFontDialog', 'WinMenuBar', 'WinOpenDialog',
+ 'WinPlayMedia', 'WinSaveDialog', 'WinScrollBar', 'WinSelectDialog',
+ 'write', 'WriteImage', 'writes', 'WSection',
+ 'WSync'), prefix=r'\b', suffix=r'\b'),
+ Name.Function),
+ include('numbers'),
+ (r'===|~===|\*\*|\+\+|--|\.|==|~==|<=|>=|=|~=|<<=|<<|>>=|>>|'
+ r':=:|:=|<->|<-|\+:=|\|\||\|', Operator),
+ (r'"(?:[^\\"]|\\.)*"', String),
+ (r"'(?:[^\\']|\\.)*'", String.Character),
+ (r'[*<>+=/&!?@~\\-]', Operator),
+ (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
+ (r"[\[\]]", Punctuation),
+ (r"<>|=>|[()|:;,.'`{}%\^&?]", Punctuation),
+ (r'\n+', Text),
+ ],
+ 'numbers': [
+ (r'\b([+-]?([2-9]|[12][0-9]|3[0-6])[rR][0-9a-zA-Z]+)\b', Number.Hex),
+ (r'[+-]?[0-9]*\.([0-9]*)([Ee][+-]?[0-9]*)?', Number.Float),
+ (r'\b([+-]?[0-9]+[KMGTPkmgtp]?)\b', Number.Integer),
+ ],
+ 'subprogram': [
+ (r'\(', Punctuation, ('#pop', 'formal_part')),
+ (r';', Punctuation, '#pop'),
+ (r'"[^"]+"|\w+', Name.Function),
+ include('root'),
+ ],
+ 'type_def': [
+ (r'\(', Punctuation, 'formal_part'),
+ ],
+ 'formal_part': [
+ (r'\)', Punctuation, '#pop'),
+ (r'\w+', Name.Variable),
+ (r',', Punctuation),
+ (r'(:string|:integer|:real)\b', Keyword.Reserved),
+ include('root'),
+ ],
+ }
+
+
+class UcodeLexer(RegexLexer):
+ """
+ Lexer for Icon ucode files.
+
+ .. versionadded:: 2.4
+ """
+ name = 'ucode'
+ aliases = ['ucode']
+ filenames = ['*.u', '*.u1', '*.u2']
+ mimetypes = []
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'(#.*\n)', Comment),
+ (words((
+ 'con', 'declend', 'end',
+ 'global',
+ 'impl', 'invocable',
+ 'lab', 'link', 'local',
+ 'record',
+ 'uid', 'unions',
+ 'version'),
+ prefix=r'\b', suffix=r'\b'),
+ Name.Function),
+ (words((
+ 'colm', 'filen', 'line', 'synt'),
+ prefix=r'\b', suffix=r'\b'),
+ Comment),
+ (words((
+ 'asgn',
+ 'bang', 'bscan',
+ 'cat', 'ccase', 'chfail',
+ 'coact', 'cofail', 'compl',
+ 'coret', 'create', 'cset',
+ 'diff', 'div', 'dup',
+ 'efail', 'einit', 'end', 'eqv', 'eret',
+ 'error', 'escan', 'esusp',
+ 'field',
+ 'goto',
+ 'init', 'int', 'inter',
+ 'invoke',
+ 'keywd',
+ 'lconcat', 'lexeq', 'lexge',
+ 'lexgt', 'lexle', 'lexlt', 'lexne',
+ 'limit', 'llist', 'lsusp',
+ 'mark', 'mark0', 'minus', 'mod', 'mult',
+ 'neg', 'neqv', 'nonnull', 'noop', 'null',
+ 'number', 'numeq', 'numge', 'numgt',
+ 'numle', 'numlt', 'numne',
+ 'pfail', 'plus', 'pnull', 'pop', 'power',
+ 'pret', 'proc', 'psusp', 'push1', 'pushn1',
+ 'random', 'rasgn', 'rcv', 'rcvbk', 'real',
+ 'refresh', 'rswap',
+ 'sdup', 'sect', 'size', 'snd', 'sndbk',
+ 'str', 'subsc', 'swap',
+ 'tabmat', 'tally', 'toby', 'trace',
+ 'unmark',
+ 'value', 'var'), prefix=r'\b', suffix=r'\b'),
+ Keyword.Declaration),
+ (words((
+ 'any',
+ 'case',
+ 'endcase', 'endevery', 'endif',
+ 'endifelse', 'endrepeat', 'endsuspend',
+ 'enduntil', 'endwhile', 'every',
+ 'if', 'ifelse',
+ 'repeat',
+ 'suspend',
+ 'until',
+ 'while'),
+ prefix=r'\b', suffix=r'\b'),
+ Name.Constant),
+ (r'\d+(\s*|\.$|$)', Number.Integer),
+ (r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float),
+ (r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float),
+ (r"(<>|=>|[()|:;,.'`]|[{}]|[%^]|[&?])", Punctuation),
+ (r'\s+\b', Text),
+ (r'[\w-]+', Text),
+ ],
+ }
+
+ def analyse_text(text):
+ """endsuspend and endrepeat are unique to this language, and
+ \\self, /self doesn't seem to get used anywhere else either."""
+ result = 0
+
+ if 'endsuspend' in text:
+ result += 0.1
+
+ if 'endrepeat' in text:
+ result += 0.1
+
+ if ':=' in text:
+ result += 0.01
+
+ if 'procedure' in text and 'end' in text:
+ result += 0.01
+
+ # This seems quite unique to unicon -- doesn't appear in any other
+ # example source we have (A quick search reveals that \SELF appears in
+ # Perl/Raku code)
+ if r'\self' in text and r'/self' in text:
+ result += 0.5
+
+ return result
diff --git a/pygments/lexers/urbi.py b/pygments/lexers/urbi.py
new file mode 100644
index 0000000..f197d59
--- /dev/null
+++ b/pygments/lexers/urbi.py
@@ -0,0 +1,145 @@
+"""
+ pygments.lexers.urbi
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for UrbiScript language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import ExtendedRegexLexer, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['UrbiscriptLexer']
+
+
+class UrbiscriptLexer(ExtendedRegexLexer):
+ """
+ For UrbiScript source code.
+
+ .. versionadded:: 1.5
+ """
+
+ name = 'UrbiScript'
+ aliases = ['urbiscript']
+ filenames = ['*.u']
+ mimetypes = ['application/x-urbiscript']
+
+ flags = re.DOTALL
+
+ # TODO
+ # - handle Experimental and deprecated tags with specific tokens
+ # - handle Angles and Durations with specific tokens
+
+ def blob_callback(lexer, match, ctx):
+ text_before_blob = match.group(1)
+ blob_start = match.group(2)
+ blob_size_str = match.group(3)
+ blob_size = int(blob_size_str)
+ yield match.start(), String, text_before_blob
+ ctx.pos += len(text_before_blob)
+
+ # if blob size doesn't match blob format (example : "\B(2)(aaa)")
+ # yield blob as a string
+ if ctx.text[match.end() + blob_size] != ")":
+ result = "\\B(" + blob_size_str + ")("
+ yield match.start(), String, result
+ ctx.pos += len(result)
+ return
+
+ # if blob is well formatted, yield as Escape
+ blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")"
+ yield match.start(), String.Escape, blob_text
+ ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")"
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ # comments
+ (r'//.*?\n', Comment),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'(every|for|loop|while)(?:;|&|\||,)', Keyword),
+ (words((
+ 'assert', 'at', 'break', 'case', 'catch', 'closure', 'compl',
+ 'continue', 'default', 'else', 'enum', 'every', 'external',
+ 'finally', 'for', 'freezeif', 'if', 'new', 'onleave', 'return',
+ 'stopif', 'switch', 'this', 'throw', 'timeout', 'try',
+ 'waituntil', 'whenever', 'while'), suffix=r'\b'),
+ Keyword),
+ (words((
+ 'asm', 'auto', 'bool', 'char', 'const_cast', 'delete', 'double',
+ 'dynamic_cast', 'explicit', 'export', 'extern', 'float', 'friend',
+ 'goto', 'inline', 'int', 'long', 'mutable', 'namespace', 'register',
+ 'reinterpret_cast', 'short', 'signed', 'sizeof', 'static_cast',
+ 'struct', 'template', 'typedef', 'typeid', 'typename', 'union',
+ 'unsigned', 'using', 'virtual', 'volatile', 'wchar_t'), suffix=r'\b'),
+ Keyword.Reserved),
+ # deprecated keywords, use a meaningful token when available
+ (r'(emit|foreach|internal|loopn|static)\b', Keyword),
+ # ignored keywords, use a meaningful token when available
+ (r'(private|protected|public)\b', Keyword),
+ (r'(var|do|const|function|class)\b', Keyword.Declaration),
+ (r'(true|false|nil|void)\b', Keyword.Constant),
+ (words((
+ 'Barrier', 'Binary', 'Boolean', 'CallMessage', 'Channel', 'Code',
+ 'Comparable', 'Container', 'Control', 'Date', 'Dictionary', 'Directory',
+ 'Duration', 'Enumeration', 'Event', 'Exception', 'Executable', 'File',
+ 'Finalizable', 'Float', 'FormatInfo', 'Formatter', 'Global', 'Group',
+ 'Hash', 'InputStream', 'IoService', 'Job', 'Kernel', 'Lazy', 'List',
+ 'Loadable', 'Lobby', 'Location', 'Logger', 'Math', 'Mutex', 'nil',
+ 'Object', 'Orderable', 'OutputStream', 'Pair', 'Path', 'Pattern',
+ 'Position', 'Primitive', 'Process', 'Profile', 'PseudoLazy', 'PubSub',
+ 'RangeIterable', 'Regexp', 'Semaphore', 'Server', 'Singleton', 'Socket',
+ 'StackFrame', 'Stream', 'String', 'System', 'Tag', 'Timeout',
+ 'Traceable', 'TrajectoryGenerator', 'Triplet', 'Tuple', 'UObject',
+ 'UValue', 'UVar'), suffix=r'\b'),
+ Name.Builtin),
+ (r'(?:this)\b', Name.Builtin.Pseudo),
+ # don't match single | and &
+ (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator),
+ (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b',
+ Operator.Word),
+ (r'[{}\[\]()]+', Punctuation),
+ (r'(?:;|\||,|&|\?|!)+', Punctuation),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ # Float, Integer, Angle and Duration
+ (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?'
+ r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float),
+ # handle binary blob in strings
+ (r'"', String.Double, "string.double"),
+ (r"'", String.Single, "string.single"),
+ ],
+ 'string.double': [
+ (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback),
+ (r'(\\\\|\\[^\\]|[^"\\])*?"', String.Double, '#pop'),
+ ],
+ 'string.single': [
+ (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback),
+ (r"(\\\\|\\[^\\]|[^'\\])*?'", String.Single, '#pop'),
+ ],
+ # from http://pygments.org/docs/lexerdevelopment/#changing-states
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ]
+ }
+
+ def analyse_text(text):
+ """This is fairly similar to C and others, but freezeif and
+ waituntil are unique keywords."""
+ result = 0
+
+ if 'freezeif' in text:
+ result += 0.05
+
+ if 'waituntil' in text:
+ result += 0.05
+
+ return result
diff --git a/pygments/lexers/usd.py b/pygments/lexers/usd.py
new file mode 100644
index 0000000..8c8e5a6
--- /dev/null
+++ b/pygments/lexers/usd.py
@@ -0,0 +1,90 @@
+"""
+ pygments.lexers.usd
+ ~~~~~~~~~~~~~~~~~~~
+
+ The module that parses Pixar's Universal Scene Description file format.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.lexer import words as words_
+from pygments.lexers._usd_builtins import COMMON_ATTRIBUTES, KEYWORDS, \
+ OPERATORS, SPECIAL_NAMES, TYPES
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
+
+__all__ = ["UsdLexer"]
+
+
+def _keywords(words, type_):
+ return [(words_(words, prefix=r"\b", suffix=r"\b"), type_)]
+
+
+_TYPE = r"(\w+(?:\[\])?)"
+_BASE_ATTRIBUTE = r"(\w+(?:\:\w+)*)(?:(\.)(timeSamples))?"
+_WHITESPACE = r"([ \t]+)"
+
+
+class UsdLexer(RegexLexer):
+ """
+ A lexer that parses Pixar's Universal Scene Description file format.
+
+ .. versionadded:: 2.6
+ """
+
+ name = "USD"
+ url = 'https://graphics.pixar.com/usd/release/index.html'
+ aliases = ["usd", "usda"]
+ filenames = ["*.usd", "*.usda"]
+
+ tokens = {
+ "root": [
+ (r"(custom){_WHITESPACE}(uniform)(\s+){}(\s+){}(\s*)(=)".format(
+ _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
+ bygroups(Keyword.Token, Whitespace, Keyword.Token, Whitespace,
+ Keyword.Type, Whitespace, Name.Attribute, Text,
+ Name.Keyword.Tokens, Whitespace, Operator)),
+ (r"(custom){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
+ _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
+ bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
+ Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
+ Operator)),
+ (r"(uniform){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
+ _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
+ bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
+ Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
+ Operator)),
+ (r"{}{_WHITESPACE}{}(\s*)(=)".format(
+ _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
+ bygroups(Keyword.Type, Whitespace, Name.Attribute, Text,
+ Name.Keyword.Tokens, Whitespace, Operator)),
+ ] +
+ _keywords(KEYWORDS, Keyword.Tokens) +
+ _keywords(SPECIAL_NAMES, Name.Builtins) +
+ _keywords(COMMON_ATTRIBUTES, Name.Attribute) +
+ [(r"\b\w+:[\w:]+\b", Name.Attribute)] +
+ _keywords(OPERATORS, Operator) + # more attributes
+ [(type_ + r"\[\]", Keyword.Type) for type_ in TYPES] +
+ _keywords(TYPES, Keyword.Type) +
+ [
+ (r"[(){}\[\]]", Punctuation),
+ ("#.*?$", Comment.Single),
+ (",", Punctuation),
+ (";", Punctuation), # ";"s are allowed to combine separate metadata lines
+ ("=", Operator),
+ (r"[-]*([0-9]*[.])?[0-9]+(?:e[+-]*\d+)?", Number),
+ (r"'''(?:.|\n)*?'''", String),
+ (r'"""(?:.|\n)*?"""', String),
+ (r"'.*?'", String),
+ (r'".*?"', String),
+ (r"<(\.\./)*([\w/]+|[\w/]+\.\w+[\w:]*)>", Name.Namespace),
+ (r"@.*?@", String.Interpol),
+ (r'\(.*"[.\\n]*".*\)', String.Doc),
+ (r"\A#usda .+$", Comment.Hashbang),
+ (r"\s+", Whitespace),
+ (r"\w+", Text),
+ (r"[_:.]+", Punctuation),
+ ],
+ }
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
new file mode 100644
index 0000000..8d577c2
--- /dev/null
+++ b/pygments/lexers/varnish.py
@@ -0,0 +1,189 @@
+"""
+ pygments.lexers.varnish
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Varnish configuration
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, \
+ inherit, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal, Whitespace
+
+__all__ = ['VCLLexer', 'VCLSnippetLexer']
+
+
+class VCLLexer(RegexLexer):
+ """
+ For Varnish Configuration Language (VCL).
+
+ .. versionadded:: 2.2
+ """
+ name = 'VCL'
+ aliases = ['vcl']
+ filenames = ['*.vcl']
+ mimetypes = ['text/x-vclsrc']
+
+ def analyse_text(text):
+ # If the very first line is 'vcl 4.0;' it's pretty much guaranteed
+ # that this is VCL
+ if text.startswith('vcl 4.0;'):
+ return 1.0
+ # Skip over comments and blank lines
+ # This is accurate enough that returning 0.9 is reasonable.
+ # Almost no VCL files start without some comments.
+ elif '\nvcl 4.0;' in text[:1000]:
+ return 0.9
+
+ tokens = {
+ 'probe': [
+ include('whitespace'),
+ include('comments'),
+ (r'(\.\w+)(\s*=\s*)([^;]*)(;)',
+ bygroups(Name.Attribute, Operator, using(this), Punctuation)),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'acl': [
+ include('whitespace'),
+ include('comments'),
+ (r'[!/]+', Operator),
+ (r';', Punctuation),
+ (r'\d+', Number),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'backend': [
+ include('whitespace'),
+ (r'(\.probe)(\s*=\s*)(\w+)(;)',
+ bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
+ (r'(\.probe)(\s*=\s*)(\{)',
+ bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
+ bygroups(Name.Attribute, Operator, using(this), Punctuation)),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'statements': [
+ (r'(\d\.)?\d+[sdwhmy]', Literal.Date),
+ (r'(\d\.)?\d+ms', Literal.Date),
+ (r'(vcl_pass|vcl_hash|vcl_hit|vcl_init|vcl_backend_fetch|vcl_pipe|'
+ r'vcl_backend_response|vcl_synth|vcl_deliver|vcl_backend_error|'
+ r'vcl_fini|vcl_recv|vcl_purge|vcl_miss)\b', Name.Function),
+ (r'(pipe|retry|hash|synth|deliver|purge|abandon|lookup|pass|fail|ok|'
+ r'miss|fetch|restart)\b', Name.Constant),
+ (r'(beresp|obj|resp|req|req_top|bereq)\.http\.[a-zA-Z_-]+\b', Name.Variable),
+ (words((
+ 'obj.status', 'req.hash_always_miss', 'beresp.backend', 'req.esi_level',
+ 'req.can_gzip', 'beresp.ttl', 'obj.uncacheable', 'req.ttl', 'obj.hits',
+ 'client.identity', 'req.hash_ignore_busy', 'obj.reason', 'req.xid',
+ 'req_top.proto', 'beresp.age', 'obj.proto', 'obj.age', 'local.ip',
+ 'beresp.uncacheable', 'req.method', 'beresp.backend.ip', 'now',
+ 'obj.grace', 'req.restarts', 'beresp.keep', 'req.proto', 'resp.proto',
+ 'bereq.xid', 'bereq.between_bytes_timeout', 'req.esi',
+ 'bereq.first_byte_timeout', 'bereq.method', 'bereq.connect_timeout',
+ 'beresp.do_gzip', 'resp.status', 'beresp.do_gunzip',
+ 'beresp.storage_hint', 'resp.is_streaming', 'beresp.do_stream',
+ 'req_top.method', 'bereq.backend', 'beresp.backend.name', 'beresp.status',
+ 'req.url', 'obj.keep', 'obj.ttl', 'beresp.reason', 'bereq.retries',
+ 'resp.reason', 'bereq.url', 'beresp.do_esi', 'beresp.proto', 'client.ip',
+ 'bereq.proto', 'server.hostname', 'remote.ip', 'req.backend_hint',
+ 'server.identity', 'req_top.url', 'beresp.grace', 'beresp.was_304',
+ 'server.ip', 'bereq.uncacheable'), suffix=r'\b'),
+ Name.Variable),
+ (r'[!%&+*\-,/<.}{>=|~]+', Operator),
+ (r'[();]', Punctuation),
+
+ (r'[,]+', Punctuation),
+ (words(('hash_data', 'regsub', 'regsuball', 'if', 'else',
+ 'elsif', 'elif', 'synth', 'synthetic', 'ban',
+ 'return', 'set', 'unset', 'import', 'include', 'new',
+ 'rollback', 'call'), suffix=r'\b'),
+ Keyword),
+ (r'storage\.\w+\.\w+\b', Name.Variable),
+ (words(('true', 'false')), Name.Builtin),
+ (r'\d+\b', Number),
+ (r'(backend)(\s+\w+)(\s*\{)',
+ bygroups(Keyword, Name.Variable.Global, Punctuation), 'backend'),
+ (r'(probe\s)(\s*\w+\s)(\{)',
+ bygroups(Keyword, Name.Variable.Global, Punctuation), 'probe'),
+ (r'(acl\s)(\s*\w+\s)(\{)',
+ bygroups(Keyword, Name.Variable.Global, Punctuation), 'acl'),
+ (r'(vcl )(4.0)(;)$',
+ bygroups(Keyword.Reserved, Name.Constant, Punctuation)),
+ (r'(sub\s+)([a-zA-Z]\w*)(\s*\{)',
+ bygroups(Keyword, Name.Function, Punctuation)),
+ (r'([a-zA-Z_]\w*)'
+ r'(\.)'
+ r'([a-zA-Z_]\w*)'
+ r'(\s*\(.*\))',
+ bygroups(Name.Function, Punctuation, Name.Function, using(this))),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'comments': [
+ (r'#.*$', Comment),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*$', Comment),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'[^"\n]+', String), # all other characters
+ ],
+ 'multistring': [
+ (r'[^"}]', String),
+ (r'"\}', String, '#pop'),
+ (r'["}]', String),
+ ],
+ 'whitespace': [
+ (r'L?"', String, 'string'),
+ (r'\{"', String, 'multistring'),
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'\\\n', Text), # line continuation
+ ],
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ include('statements'),
+ (r'\s+', Whitespace),
+ ],
+ }
+
+
+class VCLSnippetLexer(VCLLexer):
+ """
+ For Varnish Configuration Language snippets.
+
+ .. versionadded:: 2.2
+ """
+ name = 'VCLSnippets'
+ aliases = ['vclsnippets', 'vclsnippet']
+ mimetypes = ['text/x-vclsnippet']
+ filenames = []
+
+ def analyse_text(text):
+ # override method inherited from VCLLexer
+ return 0
+
+ tokens = {
+ 'snippetspre': [
+ (r'\.\.\.+', Comment),
+ (r'(bereq|req|req_top|resp|beresp|obj|client|server|local|remote|'
+ r'storage)($|\.\*)', Name.Variable),
+ ],
+ 'snippetspost': [
+ (r'(backend)\b', Keyword.Reserved),
+ ],
+ 'root': [
+ include('snippetspre'),
+ inherit,
+ include('snippetspost'),
+ ],
+ }
diff --git a/pygments/lexers/verification.py b/pygments/lexers/verification.py
new file mode 100644
index 0000000..1721d77
--- /dev/null
+++ b/pygments/lexers/verification.py
@@ -0,0 +1,114 @@
+"""
+ pygments.lexers.verification
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Intermediate Verification Languages (IVLs).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Comment, Operator, Keyword, Name, Number, \
+ Punctuation, Text, Generic
+
+__all__ = ['BoogieLexer', 'SilverLexer']
+
+
+class BoogieLexer(RegexLexer):
+ """
+ For Boogie source code.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Boogie'
+ url = 'https://boogie-docs.readthedocs.io/en/latest/'
+ aliases = ['boogie']
+ filenames = ['*.bpl']
+
+ tokens = {
+ 'root': [
+ # Whitespace and Comments
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//[/!](.*?)\n', Comment.Doc),
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ (words((
+ 'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
+ 'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
+ 'then', 'var', 'while'),
+ suffix=r'\b'), Keyword),
+ (words(('const',), suffix=r'\b'), Keyword.Reserved),
+
+ (words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
+ include('numbers'),
+ (r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
+ (r'\{.*?\}', Generic.Emph), #triggers
+ (r"([{}():;,.])", Punctuation),
+ # Identifier
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'numbers': [
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
+
+
+class SilverLexer(RegexLexer):
+ """
+ For Silver source code.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Silver'
+ aliases = ['silver']
+ filenames = ['*.sil', '*.vpr']
+
+ tokens = {
+ 'root': [
+ # Whitespace and Comments
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//[/!](.*?)\n', Comment.Doc),
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ (words((
+ 'result', 'true', 'false', 'null', 'method', 'function',
+ 'predicate', 'program', 'domain', 'axiom', 'var', 'returns',
+ 'field', 'define', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert',
+ 'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh',
+ 'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection',
+ 'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists',
+ 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique',
+ 'apply', 'package', 'folding', 'label', 'forperm'),
+ suffix=r'\b'), Keyword),
+ (words(('requires', 'ensures', 'invariant'), suffix=r'\b'), Name.Decorator),
+ (words(('Int', 'Perm', 'Bool', 'Ref', 'Rational'), suffix=r'\b'), Keyword.Type),
+ include('numbers'),
+ (r'[!%&*+=|?:<>/\-\[\]]', Operator),
+ (r'\{.*?\}', Generic.Emph), #triggers
+ (r'([{}():;,.])', Punctuation),
+ # Identifier
+ (r'[\w$]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'numbers': [
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
diff --git a/pygments/lexers/web.py b/pygments/lexers/web.py
new file mode 100644
index 0000000..4acd3dd
--- /dev/null
+++ b/pygments/lexers/web.py
@@ -0,0 +1,23 @@
+"""
+ pygments.lexers.web
+ ~~~~~~~~~~~~~~~~~~~
+
+ Just export previously exported lexers.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \
+ HamlLexer, ScamlLexer, JadeLexer
+from pygments.lexers.css import CssLexer, SassLexer, ScssLexer
+from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \
+ DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer
+from pygments.lexers.actionscript import ActionScriptLexer, \
+ ActionScript3Lexer, MxmlLexer
+from pygments.lexers.php import PhpLexer
+from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer
+from pygments.lexers.data import JsonLexer
+JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
+
+__all__ = []
diff --git a/pygments/lexers/webassembly.py b/pygments/lexers/webassembly.py
new file mode 100644
index 0000000..e10884f
--- /dev/null
+++ b/pygments/lexers/webassembly.py
@@ -0,0 +1,120 @@
+"""
+ pygments.lexers.webassembly
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the WebAssembly text format.
+
+ The grammar can be found at https://github.com/WebAssembly/spec/blob/master/interpreter/README.md
+ and https://webassembly.github.io/spec/core/text/.
+
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, bygroups, default
+from pygments.token import Text, Comment, Operator, Keyword, String, Number, Punctuation, Name
+
+__all__ = ['WatLexer']
+
+keywords = (
+ 'module', 'import', 'func', 'funcref', 'start', 'param', 'local', 'type',
+ 'result', 'export', 'memory', 'global', 'mut', 'data', 'table', 'elem',
+ 'if', 'then', 'else', 'end', 'block', 'loop'
+)
+
+builtins = (
+ 'unreachable', 'nop', 'block', 'loop', 'if', 'else', 'end', 'br', 'br_if',
+ 'br_table', 'return', 'call', 'call_indirect', 'drop', 'select',
+ 'local.get', 'local.set', 'local.tee', 'global.get', 'global.set',
+ 'i32.load', 'i64.load', 'f32.load', 'f64.load', 'i32.load8_s',
+ 'i32.load8_u', 'i32.load16_s', 'i32.load16_u', 'i64.load8_s',
+ 'i64.load8_u', 'i64.load16_s', 'i64.load16_u', 'i64.load32_s',
+ 'i64.load32_u', 'i32.store', 'i64.store', 'f32.store', 'f64.store',
+ 'i32.store8', 'i32.store16', 'i64.store8', 'i64.store16', 'i64.store32',
+ 'memory.size', 'memory.grow', 'i32.const', 'i64.const', 'f32.const',
+ 'f64.const', 'i32.eqz', 'i32.eq', 'i32.ne', 'i32.lt_s', 'i32.lt_u',
+ 'i32.gt_s', 'i32.gt_u', 'i32.le_s', 'i32.le_u', 'i32.ge_s', 'i32.ge_u',
+ 'i64.eqz', 'i64.eq', 'i64.ne', 'i64.lt_s', 'i64.lt_u', 'i64.gt_s',
+ 'i64.gt_u', 'i64.le_s', 'i64.le_u', 'i64.ge_s', 'i64.ge_u', 'f32.eq',
+ 'f32.ne', 'f32.lt', 'f32.gt', 'f32.le', 'f32.ge', 'f64.eq', 'f64.ne',
+ 'f64.lt', 'f64.gt', 'f64.le', 'f64.ge', 'i32.clz', 'i32.ctz', 'i32.popcnt',
+ 'i32.add', 'i32.sub', 'i32.mul', 'i32.div_s', 'i32.div_u', 'i32.rem_s',
+ 'i32.rem_u', 'i32.and', 'i32.or', 'i32.xor', 'i32.shl', 'i32.shr_s',
+ 'i32.shr_u', 'i32.rotl', 'i32.rotr', 'i64.clz', 'i64.ctz', 'i64.popcnt',
+ 'i64.add', 'i64.sub', 'i64.mul', 'i64.div_s', 'i64.div_u', 'i64.rem_s',
+ 'i64.rem_u', 'i64.and', 'i64.or', 'i64.xor', 'i64.shl', 'i64.shr_s',
+ 'i64.shr_u', 'i64.rotl', 'i64.rotr', 'f32.abs', 'f32.neg', 'f32.ceil',
+ 'f32.floor', 'f32.trunc', 'f32.nearest', 'f32.sqrt', 'f32.add', 'f32.sub',
+ 'f32.mul', 'f32.div', 'f32.min', 'f32.max', 'f32.copysign', 'f64.abs',
+ 'f64.neg', 'f64.ceil', 'f64.floor', 'f64.trunc', 'f64.nearest', 'f64.sqrt',
+ 'f64.add', 'f64.sub', 'f64.mul', 'f64.div', 'f64.min', 'f64.max',
+ 'f64.copysign', 'i32.wrap_i64', 'i32.trunc_f32_s', 'i32.trunc_f32_u',
+ 'i32.trunc_f64_s', 'i32.trunc_f64_u', 'i64.extend_i32_s',
+ 'i64.extend_i32_u', 'i64.trunc_f32_s', 'i64.trunc_f32_u',
+ 'i64.trunc_f64_s', 'i64.trunc_f64_u', 'f32.convert_i32_s',
+ 'f32.convert_i32_u', 'f32.convert_i64_s', 'f32.convert_i64_u',
+ 'f32.demote_f64', 'f64.convert_i32_s', 'f64.convert_i32_u',
+ 'f64.convert_i64_s', 'f64.convert_i64_u', 'f64.promote_f32',
+ 'i32.reinterpret_f32', 'i64.reinterpret_f64', 'f32.reinterpret_i32',
+ 'f64.reinterpret_i64',
+)
+
+
+class WatLexer(RegexLexer):
+ """Lexer for the WebAssembly text format.
+
+ .. versionadded:: 2.9
+ """
+
+ name = 'WebAssembly'
+ url = 'https://webassembly.org/'
+ aliases = ['wast', 'wat']
+ filenames = ['*.wat', '*.wast']
+
+ tokens = {
+ 'root': [
+ (words(keywords, suffix=r'(?=[^a-z_\.])'), Keyword),
+ (words(builtins), Name.Builtin, 'arguments'),
+ (words(['i32', 'i64', 'f32', 'f64']), Keyword.Type),
+ (r'\$[A-Za-z0-9!#$%&\'*+./:<=>?@\\^_`|~-]+', Name.Variable), # yes, all of the are valid in identifiers
+ (r';;.*?$', Comment.Single),
+ (r'\(;', Comment.Multiline, 'nesting_comment'),
+ (r'[+-]?0x[\dA-Fa-f](_?[\dA-Fa-f])*(.([\dA-Fa-f](_?[\dA-Fa-f])*)?)?([pP][+-]?[\dA-Fa-f](_?[\dA-Fa-f])*)?', Number.Float),
+ (r'[+-]?\d.\d(_?\d)*[eE][+-]?\d(_?\d)*', Number.Float),
+ (r'[+-]?\d.\d(_?\d)*', Number.Float),
+ (r'[+-]?\d.[eE][+-]?\d(_?\d)*', Number.Float),
+ (r'[+-]?(inf|nan:0x[\dA-Fa-f](_?[\dA-Fa-f])*|nan)', Number.Float),
+ (r'[+-]?0x[\dA-Fa-f](_?[\dA-Fa-f])*', Number.Hex),
+ (r'[+-]?\d(_?\d)*', Number.Integer),
+ (r'[\(\)]', Punctuation),
+ (r'"', String.Double, 'string'),
+ (r'\s+', Text),
+ ],
+ 'nesting_comment': [
+ (r'\(;', Comment.Multiline, '#push'),
+ (r';\)', Comment.Multiline, '#pop'),
+ (r'[^;(]+', Comment.Multiline),
+ (r'[;(]', Comment.Multiline),
+ ],
+ 'string': [
+ (r'\\[\dA-Fa-f][\dA-Fa-f]', String.Escape), # must have exactly two hex digits
+ (r'\\t', String.Escape),
+ (r'\\n', String.Escape),
+ (r'\\r', String.Escape),
+ (r'\\"', String.Escape),
+ (r"\\'", String.Escape),
+ (r'\\u\{[\dA-Fa-f](_?[\dA-Fa-f])*\}', String.Escape),
+ (r'\\\\', String.Escape),
+ (r'"', String.Double, '#pop'),
+ (r'[^"\\]+', String.Double),
+ ],
+ 'arguments': [
+ (r'\s+', Text),
+ (r'(offset)(=)(0x[\dA-Fa-f](_?[\dA-Fa-f])*)', bygroups(Keyword, Operator, Number.Hex)),
+ (r'(offset)(=)(\d(_?\d)*)', bygroups(Keyword, Operator, Number.Integer)),
+ (r'(align)(=)(0x[\dA-Fa-f](_?[\dA-Fa-f])*)', bygroups(Keyword, Operator, Number.Hex)),
+ (r'(align)(=)(\d(_?\d)*)', bygroups(Keyword, Operator, Number.Integer)),
+ default('#pop'),
+ ]
+ }
diff --git a/pygments/lexers/webidl.py b/pygments/lexers/webidl.py
new file mode 100644
index 0000000..3a8460d
--- /dev/null
+++ b/pygments/lexers/webidl.py
@@ -0,0 +1,299 @@
+"""
+ pygments.lexers.webidl
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Web IDL, including some extensions.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, default, include, words
+from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
+ String, Text
+
+__all__ = ['WebIDLLexer']
+
+_builtin_types = (
+ # primitive types
+ 'byte', 'octet', 'boolean',
+ r'(?:unsigned\s+)?(?:short|long(?:\s+long)?)',
+ r'(?:unrestricted\s+)?(?:float|double)',
+ # string types
+ 'DOMString', 'ByteString', 'USVString',
+ # exception types
+ 'Error', 'DOMException',
+ # typed array types
+ 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Uint8ClampedArray',
+ 'Float32Array', 'Float64Array',
+ # buffer source types
+ 'ArrayBuffer', 'DataView', 'Int8Array', 'Int16Array', 'Int32Array',
+ # other
+ 'any', 'void', 'object', 'RegExp',
+)
+_identifier = r'_?[A-Za-z][a-zA-Z0-9_-]*'
+_keyword_suffix = r'(?![\w-])'
+_string = r'"[^"]*"'
+
+
+class WebIDLLexer(RegexLexer):
+ """
+ For Web IDL.
+
+ .. versionadded:: 2.6
+ """
+
+ name = 'Web IDL'
+ url = 'https://www.w3.org/wiki/Web_IDL'
+ aliases = ['webidl']
+ filenames = ['*.webidl']
+
+ tokens = {
+ 'common': [
+ (r'\s+', Text),
+ (r'(?s)/\*.*?\*/', Comment.Multiline),
+ (r'//.*', Comment.Single),
+ (r'^#.*', Comment.Preproc),
+ ],
+ 'root': [
+ include('common'),
+ (r'\[', Punctuation, 'extended_attributes'),
+ (r'partial' + _keyword_suffix, Keyword),
+ (r'typedef' + _keyword_suffix, Keyword, ('typedef', 'type')),
+ (r'interface' + _keyword_suffix, Keyword, 'interface_rest'),
+ (r'enum' + _keyword_suffix, Keyword, 'enum_rest'),
+ (r'callback' + _keyword_suffix, Keyword, 'callback_rest'),
+ (r'dictionary' + _keyword_suffix, Keyword, 'dictionary_rest'),
+ (r'namespace' + _keyword_suffix, Keyword, 'namespace_rest'),
+ (_identifier, Name.Class, 'implements_rest'),
+ ],
+ 'extended_attributes': [
+ include('common'),
+ (r',', Punctuation),
+ (_identifier, Name.Decorator),
+ (r'=', Punctuation, 'extended_attribute_rest'),
+ (r'\(', Punctuation, 'argument_list'),
+ (r'\]', Punctuation, '#pop'),
+ ],
+ 'extended_attribute_rest': [
+ include('common'),
+ (_identifier, Name, 'extended_attribute_named_rest'),
+ (_string, String),
+ (r'\(', Punctuation, 'identifier_list'),
+ default('#pop'),
+ ],
+ 'extended_attribute_named_rest': [
+ include('common'),
+ (r'\(', Punctuation, 'argument_list'),
+ default('#pop'),
+ ],
+ 'argument_list': [
+ include('common'),
+ (r'\)', Punctuation, '#pop'),
+ default('argument'),
+ ],
+ 'argument': [
+ include('common'),
+ (r'optional' + _keyword_suffix, Keyword),
+ (r'\[', Punctuation, 'extended_attributes'),
+ (r',', Punctuation, '#pop'),
+ (r'\)', Punctuation, '#pop:2'),
+ default(('argument_rest', 'type'))
+ ],
+ 'argument_rest': [
+ include('common'),
+ (_identifier, Name.Variable),
+ (r'\.\.\.', Punctuation),
+ (r'=', Punctuation, 'default_value'),
+ default('#pop'),
+ ],
+ 'identifier_list': [
+ include('common'),
+ (_identifier, Name.Class),
+ (r',', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ ],
+ 'type': [
+ include('common'),
+ (r'(?:' + r'|'.join(_builtin_types) + r')' + _keyword_suffix,
+ Keyword.Type, 'type_null'),
+ (words(('sequence', 'Promise', 'FrozenArray'),
+ suffix=_keyword_suffix), Keyword.Type, 'type_identifier'),
+ (_identifier, Name.Class, 'type_identifier'),
+ (r'\(', Punctuation, 'union_type'),
+ ],
+ 'union_type': [
+ include('common'),
+ (r'or' + _keyword_suffix, Keyword),
+ (r'\)', Punctuation, ('#pop', 'type_null')),
+ default('type'),
+ ],
+ 'type_identifier': [
+ (r'<', Punctuation, 'type_list'),
+ default(('#pop', 'type_null'))
+ ],
+ 'type_null': [
+ (r'\?', Punctuation),
+ default('#pop:2'),
+ ],
+ 'default_value': [
+ include('common'),
+ include('const_value'),
+ (_string, String, '#pop'),
+ (r'\[\s*\]', Punctuation, '#pop'),
+ ],
+ 'const_value': [
+ include('common'),
+ (words(('true', 'false', '-Infinity', 'Infinity', 'NaN', 'null'),
+ suffix=_keyword_suffix), Keyword.Constant, '#pop'),
+ (r'-?(?:(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:[Ee][+-]?[0-9]+)?' +
+ r'|[0-9]+[Ee][+-]?[0-9]+)', Number.Float, '#pop'),
+ (r'-?[1-9][0-9]*', Number.Integer, '#pop'),
+ (r'-?0[Xx][0-9A-Fa-f]+', Number.Hex, '#pop'),
+ (r'-?0[0-7]*', Number.Oct, '#pop'),
+ ],
+ 'typedef': [
+ include('common'),
+ (_identifier, Name.Class),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'namespace_rest': [
+ include('common'),
+ (_identifier, Name.Namespace),
+ (r'\{', Punctuation, 'namespace_body'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'namespace_body': [
+ include('common'),
+ (r'\[', Punctuation, 'extended_attributes'),
+ (r'readonly' + _keyword_suffix, Keyword),
+ (r'attribute' + _keyword_suffix,
+ Keyword, ('attribute_rest', 'type')),
+ (r'const' + _keyword_suffix, Keyword, ('const_rest', 'type')),
+ (r'\}', Punctuation, '#pop'),
+ default(('operation_rest', 'type')),
+ ],
+ 'interface_rest': [
+ include('common'),
+ (_identifier, Name.Class),
+ (r':', Punctuation),
+ (r'\{', Punctuation, 'interface_body'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'interface_body': [
+ (words(('iterable', 'maplike', 'setlike'), suffix=_keyword_suffix),
+ Keyword, 'iterable_maplike_setlike_rest'),
+ (words(('setter', 'getter', 'creator', 'deleter', 'legacycaller',
+ 'inherit', 'static', 'stringifier', 'jsonifier'),
+ suffix=_keyword_suffix), Keyword),
+ (r'serializer' + _keyword_suffix, Keyword, 'serializer_rest'),
+ (r';', Punctuation),
+ include('namespace_body'),
+ ],
+ 'attribute_rest': [
+ include('common'),
+ (_identifier, Name.Variable),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'const_rest': [
+ include('common'),
+ (_identifier, Name.Constant),
+ (r'=', Punctuation, 'const_value'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'operation_rest': [
+ include('common'),
+ (r';', Punctuation, '#pop'),
+ default('operation'),
+ ],
+ 'operation': [
+ include('common'),
+ (_identifier, Name.Function),
+ (r'\(', Punctuation, 'argument_list'),
+ (r';', Punctuation, '#pop:2'),
+ ],
+ 'iterable_maplike_setlike_rest': [
+ include('common'),
+ (r'<', Punctuation, 'type_list'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'type_list': [
+ include('common'),
+ (r',', Punctuation),
+ (r'>', Punctuation, '#pop'),
+ default('type'),
+ ],
+ 'serializer_rest': [
+ include('common'),
+ (r'=', Punctuation, 'serialization_pattern'),
+ (r';', Punctuation, '#pop'),
+ default('operation'),
+ ],
+ 'serialization_pattern': [
+ include('common'),
+ (_identifier, Name.Variable, '#pop'),
+ (r'\{', Punctuation, 'serialization_pattern_map'),
+ (r'\[', Punctuation, 'serialization_pattern_list'),
+ ],
+ 'serialization_pattern_map': [
+ include('common'),
+ (words(('getter', 'inherit', 'attribute'),
+ suffix=_keyword_suffix), Keyword),
+ (r',', Punctuation),
+ (_identifier, Name.Variable),
+ (r'\}', Punctuation, '#pop:2'),
+ ],
+ 'serialization_pattern_list': [
+ include('common'),
+ (words(('getter', 'attribute'), suffix=_keyword_suffix), Keyword),
+ (r',', Punctuation),
+ (_identifier, Name.Variable),
+ (r']', Punctuation, '#pop:2'),
+ ],
+ 'enum_rest': [
+ include('common'),
+ (_identifier, Name.Class),
+ (r'\{', Punctuation, 'enum_body'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'enum_body': [
+ include('common'),
+ (_string, String),
+ (r',', Punctuation),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'callback_rest': [
+ include('common'),
+ (r'interface' + _keyword_suffix,
+ Keyword, ('#pop', 'interface_rest')),
+ (_identifier, Name.Class),
+ (r'=', Punctuation, ('operation', 'type')),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'dictionary_rest': [
+ include('common'),
+ (_identifier, Name.Class),
+ (r':', Punctuation),
+ (r'\{', Punctuation, 'dictionary_body'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'dictionary_body': [
+ include('common'),
+ (r'\[', Punctuation, 'extended_attributes'),
+ (r'required' + _keyword_suffix, Keyword),
+ (r'\}', Punctuation, '#pop'),
+ default(('dictionary_item', 'type')),
+ ],
+ 'dictionary_item': [
+ include('common'),
+ (_identifier, Name.Variable),
+ (r'=', Punctuation, 'default_value'),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'implements_rest': [
+ include('common'),
+ (r'implements' + _keyword_suffix, Keyword),
+ (_identifier, Name.Class),
+ (r';', Punctuation, '#pop'),
+ ],
+ }
diff --git a/pygments/lexers/webmisc.py b/pygments/lexers/webmisc.py
new file mode 100644
index 0000000..a048b50
--- /dev/null
+++ b/pygments/lexers/webmisc.py
@@ -0,0 +1,1010 @@
+"""
+ pygments.lexers.webmisc
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for misc. web stuff.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
+ default, using
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal, Whitespace
+
+from pygments.lexers.css import _indentation, _starts_block
+from pygments.lexers.html import HtmlLexer
+from pygments.lexers.javascript import JavascriptLexer
+from pygments.lexers.ruby import RubyLexer
+
+__all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer']
+
+
+class DuelLexer(RegexLexer):
+ """
+ Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
+
+ .. versionadded:: 1.4
+ """
+
+ name = 'Duel'
+ url = 'http://duelengine.org/'
+ aliases = ['duel', 'jbst', 'jsonml+bst']
+ filenames = ['*.duel', '*.jbst']
+ mimetypes = ['text/x-duel', 'text/x-jbst']
+
+ flags = re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'(<%[@=#!:]?)(.*?)(%>)',
+ bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
+ (r'(<%\$)(.*?)(:)(.*?)(%>)',
+ bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
+ (r'(<%--)(.*?)(--%>)',
+ bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
+ (r'(<script.*?>)(.*?)(</script>)',
+ bygroups(using(HtmlLexer),
+ using(JavascriptLexer), using(HtmlLexer))),
+ (r'(.+?)(?=<)', using(HtmlLexer)),
+ (r'.+', using(HtmlLexer)),
+ ],
+ }
+
+
+class XQueryLexer(ExtendedRegexLexer):
+ """
+ An XQuery lexer, parsing a stream and outputting the tokens needed to
+ highlight xquery code.
+
+ .. versionadded:: 1.4
+ """
+ name = 'XQuery'
+ url = 'https://www.w3.org/XML/Query/'
+ aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
+ filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
+ mimetypes = ['text/xquery', 'application/xquery']
+
+ xquery_parse_state = []
+
+ # FIX UNICODE LATER
+ # ncnamestartchar = (
+ # r"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
+ # r"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
+ # r"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
+ # r"[\u10000-\uEFFFF]"
+ # )
+ ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
+ # FIX UNICODE LATER
+ # ncnamechar = ncnamestartchar + (r"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
+ # r"[\u203F-\u2040]")
+ ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
+ ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
+ pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])"
+ pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
+ pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
+ prefixedname = "%s:%s" % (ncname, ncname)
+ unprefixedname = ncname
+ qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
+
+ entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
+ charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
+
+ stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
+ stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
+
+ # FIX UNICODE LATER
+ # elementcontentchar = (r'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
+ # r'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
+ elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
+ # quotattrcontentchar = (r'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
+ # r'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
+ quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
+ # aposattrcontentchar = (r'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
+ # r'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
+ aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]'
+
+ # CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
+ # aposattrcontentchar
+ # x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
+
+ flags = re.DOTALL | re.MULTILINE
+
+ def punctuation_root_callback(lexer, match, ctx):
+ yield match.start(), Punctuation, match.group(1)
+ # transition to root always - don't pop off stack
+ ctx.stack = ['root']
+ ctx.pos = match.end()
+
+ def operator_root_callback(lexer, match, ctx):
+ yield match.start(), Operator, match.group(1)
+ # transition to root always - don't pop off stack
+ ctx.stack = ['root']
+ ctx.pos = match.end()
+
+ def popstate_tag_callback(lexer, match, ctx):
+ yield match.start(), Name.Tag, match.group(1)
+ if lexer.xquery_parse_state:
+ ctx.stack.append(lexer.xquery_parse_state.pop())
+ ctx.pos = match.end()
+
+ def popstate_xmlcomment_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append(lexer.xquery_parse_state.pop())
+ ctx.pos = match.end()
+
+ def popstate_kindtest_callback(lexer, match, ctx):
+ yield match.start(), Punctuation, match.group(1)
+ next_state = lexer.xquery_parse_state.pop()
+ if next_state == 'occurrenceindicator':
+ if re.match("[?*+]+", match.group(2)):
+ yield match.start(), Punctuation, match.group(2)
+ ctx.stack.append('operator')
+ ctx.pos = match.end()
+ else:
+ ctx.stack.append('operator')
+ ctx.pos = match.end(1)
+ else:
+ ctx.stack.append(next_state)
+ ctx.pos = match.end(1)
+
+ def popstate_callback(lexer, match, ctx):
+ yield match.start(), Punctuation, match.group(1)
+ # if we have run out of our state stack, pop whatever is on the pygments
+ # state stack
+ if len(lexer.xquery_parse_state) == 0:
+ ctx.stack.pop()
+ if not ctx.stack:
+ # make sure we have at least the root state on invalid inputs
+ ctx.stack = ['root']
+ elif len(ctx.stack) > 1:
+ ctx.stack.append(lexer.xquery_parse_state.pop())
+ else:
+ # i don't know if i'll need this, but in case, default back to root
+ ctx.stack = ['root']
+ ctx.pos = match.end()
+
+ def pushstate_element_content_starttag_callback(lexer, match, ctx):
+ yield match.start(), Name.Tag, match.group(1)
+ lexer.xquery_parse_state.append('element_content')
+ ctx.stack.append('start_tag')
+ ctx.pos = match.end()
+
+ def pushstate_cdata_section_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('cdata_section')
+ lexer.xquery_parse_state.append(ctx.state.pop)
+ ctx.pos = match.end()
+
+ def pushstate_starttag_callback(lexer, match, ctx):
+ yield match.start(), Name.Tag, match.group(1)
+ lexer.xquery_parse_state.append(ctx.state.pop)
+ ctx.stack.append('start_tag')
+ ctx.pos = match.end()
+
+ def pushstate_operator_order_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ ctx.stack = ['root']
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_operator_map_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ ctx.stack = ['root']
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_operator_root_validate(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ ctx.stack = ['root']
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_operator_root_validate_withmode(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Keyword, match.group(3)
+ ctx.stack = ['root']
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('processing_instruction')
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('processing_instruction')
+ lexer.xquery_parse_state.append('element_content')
+ ctx.pos = match.end()
+
+ def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('cdata_section')
+ lexer.xquery_parse_state.append('element_content')
+ ctx.pos = match.end()
+
+ def pushstate_operator_cdata_section_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('cdata_section')
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('xml_comment')
+ lexer.xquery_parse_state.append('element_content')
+ ctx.pos = match.end()
+
+ def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
+ yield match.start(), String.Doc, match.group(1)
+ ctx.stack.append('xml_comment')
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
+ def pushstate_kindtest_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ lexer.xquery_parse_state.append('kindtest')
+ ctx.stack.append('kindtest')
+ ctx.pos = match.end()
+
+ def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ lexer.xquery_parse_state.append('operator')
+ ctx.stack.append('kindtestforpi')
+ ctx.pos = match.end()
+
+ def pushstate_operator_kindtest_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ lexer.xquery_parse_state.append('operator')
+ ctx.stack.append('kindtest')
+ ctx.pos = match.end()
+
+ def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
+ yield match.start(), Name.Tag, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ lexer.xquery_parse_state.append('occurrenceindicator')
+ ctx.stack.append('kindtest')
+ ctx.pos = match.end()
+
+ def pushstate_operator_starttag_callback(lexer, match, ctx):
+ yield match.start(), Name.Tag, match.group(1)
+ lexer.xquery_parse_state.append('operator')
+ ctx.stack.append('start_tag')
+ ctx.pos = match.end()
+
+ def pushstate_operator_root_callback(lexer, match, ctx):
+ yield match.start(), Punctuation, match.group(1)
+ lexer.xquery_parse_state.append('operator')
+ ctx.stack = ['root']
+ ctx.pos = match.end()
+
+ def pushstate_operator_root_construct_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Whitespace, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ lexer.xquery_parse_state.append('operator')
+ ctx.stack = ['root']
+ ctx.pos = match.end()
+
+ def pushstate_root_callback(lexer, match, ctx):
+ yield match.start(), Punctuation, match.group(1)
+ cur_state = ctx.stack.pop()
+ lexer.xquery_parse_state.append(cur_state)
+ ctx.stack = ['root']
+ ctx.pos = match.end()
+
+ def pushstate_operator_attribute_callback(lexer, match, ctx):
+ yield match.start(), Name.Attribute, match.group(1)
+ ctx.stack.append('operator')
+ ctx.pos = match.end()
+
+ tokens = {
+ 'comment': [
+ # xquery comments
+ (r'[^:()]+', Comment),
+ (r'\(:', Comment, '#push'),
+ (r':\)', Comment, '#pop'),
+ (r'[:()]', Comment),
+ ],
+ 'whitespace': [
+ (r'\s+', Whitespace),
+ ],
+ 'operator': [
+ include('whitespace'),
+ (r'(\})', popstate_callback),
+ (r'\(:', Comment, 'comment'),
+
+ (r'(\{)', pushstate_root_callback),
+ (r'then|else|external|at|div|except', Keyword, 'root'),
+ (r'order by', Keyword, 'root'),
+ (r'group by', Keyword, 'root'),
+ (r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
+ (r'and|or', Operator.Word, 'root'),
+ (r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
+ Operator.Word, 'root'),
+ (r'return|satisfies|to|union|where|count|preserve\s+strip',
+ Keyword, 'root'),
+ (r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=|!)',
+ operator_root_callback),
+ (r'(::|:|;|\[|//|/|,)',
+ punctuation_root_callback),
+ (r'(castable|cast)(\s+)(as)\b',
+ bygroups(Keyword, Whitespace, Keyword), 'singletype'),
+ (r'(instance)(\s+)(of)\b',
+ bygroups(Keyword, Whitespace, Keyword), 'itemtype'),
+ (r'(treat)(\s+)(as)\b',
+ bygroups(Keyword, Whitespace, Keyword), 'itemtype'),
+ (r'(case)(\s+)(' + stringdouble + ')',
+ bygroups(Keyword, Whitespace, String.Double), 'itemtype'),
+ (r'(case)(\s+)(' + stringsingle + ')',
+ bygroups(Keyword, Whitespace, String.Single), 'itemtype'),
+ (r'(case|as)\b', Keyword, 'itemtype'),
+ (r'(\))(\s*)(as)',
+ bygroups(Punctuation, Whitespace, Keyword), 'itemtype'),
+ (r'\$', Name.Variable, 'varname'),
+ (r'(for|let|previous|next)(\s+)(\$)',
+ bygroups(Keyword, Whitespace, Name.Variable), 'varname'),
+ (r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword,
+ Whitespace, Name.Variable),
+ 'varname'),
+ # (r'\)|\?|\]', Punctuation, '#push'),
+ (r'\)|\?|\]', Punctuation),
+ (r'(empty)(\s+)(greatest|least)',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'ascending|descending|default', Keyword, '#push'),
+ (r'(allowing)(\s+)(empty)',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'external', Keyword),
+ (r'(start|when|end)', Keyword, 'root'),
+ (r'(only)(\s+)(end)', bygroups(Keyword, Whitespace, Keyword),
+ 'root'),
+ (r'collation', Keyword, 'uritooperator'),
+
+ # eXist specific XQUF
+ (r'(into|following|preceding|with)', Keyword, 'root'),
+
+ # support for current context on rhs of Simple Map Operator
+ (r'\.', Operator),
+
+ # finally catch all string literals and stay in operator state
+ (stringdouble, String.Double),
+ (stringsingle, String.Single),
+
+ (r'(catch)(\s*)', bygroups(Keyword, Whitespace), 'root'),
+ ],
+ 'uritooperator': [
+ (stringdouble, String.Double, '#pop'),
+ (stringsingle, String.Single, '#pop'),
+ ],
+ 'namespacedecl': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (r'(at)(\s+)('+stringdouble+')',
+ bygroups(Keyword, Whitespace, String.Double)),
+ (r"(at)(\s+)("+stringsingle+')',
+ bygroups(Keyword, Whitespace, String.Single)),
+ (stringdouble, String.Double),
+ (stringsingle, String.Single),
+ (r',', Punctuation),
+ (r'=', Operator),
+ (r';', Punctuation, 'root'),
+ (ncname, Name.Namespace),
+ ],
+ 'namespacekeyword': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (stringdouble, String.Double, 'namespacedecl'),
+ (stringsingle, String.Single, 'namespacedecl'),
+ (r'inherit|no-inherit', Keyword, 'root'),
+ (r'namespace', Keyword, 'namespacedecl'),
+ (r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
+ (r'preserve|no-preserve', Keyword),
+ (r',', Punctuation),
+ ],
+ 'annotationname': [
+ (r'\(:', Comment, 'comment'),
+ (qname, Name.Decorator),
+ (r'(\()(' + stringdouble + ')', bygroups(Punctuation, String.Double)),
+ (r'(\()(' + stringsingle + ')', bygroups(Punctuation, String.Single)),
+ (r'(\,)(\s+)(' + stringdouble + ')',
+ bygroups(Punctuation, Text, String.Double)),
+ (r'(\,)(\s+)(' + stringsingle + ')',
+ bygroups(Punctuation, Text, String.Single)),
+ (r'\)', Punctuation),
+ (r'(\s+)(\%)', bygroups(Text, Name.Decorator), 'annotationname'),
+ (r'(\s+)(variable)(\s+)(\$)',
+ bygroups(Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
+ (r'(\s+)(function)(\s+)',
+ bygroups(Text, Keyword.Declaration, Text), 'root')
+ ],
+ 'varname': [
+ (r'\(:', Comment, 'comment'),
+ (r'(' + qname + r')(\()?', bygroups(Name, Punctuation), 'operator'),
+ ],
+ 'singletype': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (ncname + r'(:\*)', Name.Variable, 'operator'),
+ (qname, Name.Variable, 'operator'),
+ ],
+ 'itemtype': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (r'\$', Name.Variable, 'varname'),
+ (r'(void)(\s*)(\()(\s*)(\))',
+ bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
+ (r'(element|attribute|schema-element|schema-attribute|comment|text|'
+ r'node|binary|document-node|empty-sequence)(\s*)(\()',
+ pushstate_occurrenceindicator_kindtest_callback),
+ # Marklogic specific type?
+ (r'(processing-instruction)(\s*)(\()',
+ bygroups(Keyword, Text, Punctuation),
+ ('occurrenceindicator', 'kindtestforpi')),
+ (r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
+ bygroups(Keyword, Text, Punctuation, Text, Punctuation),
+ 'occurrenceindicator'),
+ (r'(\(\#)(\s*)', bygroups(Punctuation, Text), 'pragma'),
+ (r';', Punctuation, '#pop'),
+ (r'then|else', Keyword, '#pop'),
+ (r'(at)(\s+)(' + stringdouble + ')',
+ bygroups(Keyword, Text, String.Double), 'namespacedecl'),
+ (r'(at)(\s+)(' + stringsingle + ')',
+ bygroups(Keyword, Text, String.Single), 'namespacedecl'),
+ (r'except|intersect|in|is|return|satisfies|to|union|where|count',
+ Keyword, 'root'),
+ (r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
+ (r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'),
+ (r'external|at', Keyword, 'root'),
+ (r'(stable)(\s+)(order)(\s+)(by)',
+ bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
+ (r'(castable|cast)(\s+)(as)',
+ bygroups(Keyword, Text, Keyword), 'singletype'),
+ (r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
+ (r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
+ (r'(case)(\s+)(' + stringdouble + ')',
+ bygroups(Keyword, Text, String.Double), 'itemtype'),
+ (r'(case)(\s+)(' + stringsingle + ')',
+ bygroups(Keyword, Text, String.Single), 'itemtype'),
+ (r'case|as', Keyword, 'itemtype'),
+ (r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
+ (ncname + r':\*', Keyword.Type, 'operator'),
+ (r'(function|map|array)(\()', bygroups(Keyword.Type, Punctuation)),
+ (qname, Keyword.Type, 'occurrenceindicator'),
+ ],
+ 'kindtest': [
+ (r'\(:', Comment, 'comment'),
+ (r'\{', Punctuation, 'root'),
+ (r'(\))([*+?]?)', popstate_kindtest_callback),
+ (r'\*', Name, 'closekindtest'),
+ (qname, Name, 'closekindtest'),
+ (r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
+ ],
+ 'kindtestforpi': [
+ (r'\(:', Comment, 'comment'),
+ (r'\)', Punctuation, '#pop'),
+ (ncname, Name.Variable),
+ (stringdouble, String.Double),
+ (stringsingle, String.Single),
+ ],
+ 'closekindtest': [
+ (r'\(:', Comment, 'comment'),
+ (r'(\))', popstate_callback),
+ (r',', Punctuation),
+ (r'(\{)', pushstate_operator_root_callback),
+ (r'\?', Punctuation),
+ ],
+ 'xml_comment': [
+ (r'(-->)', popstate_xmlcomment_callback),
+ (r'[^-]{1,2}', Literal),
+ (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
+ Literal),
+ ],
+ 'processing_instruction': [
+ (r'\s+', Text, 'processing_instruction_content'),
+ (r'\?>', String.Doc, '#pop'),
+ (pitarget, Name),
+ ],
+ 'processing_instruction_content': [
+ (r'\?>', String.Doc, '#pop'),
+ (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
+ Literal),
+ ],
+ 'cdata_section': [
+ (r']]>', String.Doc, '#pop'),
+ (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
+ Literal),
+ ],
+ 'start_tag': [
+ include('whitespace'),
+ (r'(/>)', popstate_tag_callback),
+ (r'>', Name.Tag, 'element_content'),
+ (r'"', Punctuation, 'quot_attribute_content'),
+ (r"'", Punctuation, 'apos_attribute_content'),
+ (r'=', Operator),
+ (qname, Name.Tag),
+ ],
+ 'quot_attribute_content': [
+ (r'"', Punctuation, 'start_tag'),
+ (r'(\{)', pushstate_root_callback),
+ (r'""', Name.Attribute),
+ (quotattrcontentchar, Name.Attribute),
+ (entityref, Name.Attribute),
+ (charref, Name.Attribute),
+ (r'\{\{|\}\}', Name.Attribute),
+ ],
+ 'apos_attribute_content': [
+ (r"'", Punctuation, 'start_tag'),
+ (r'\{', Punctuation, 'root'),
+ (r"''", Name.Attribute),
+ (aposattrcontentchar, Name.Attribute),
+ (entityref, Name.Attribute),
+ (charref, Name.Attribute),
+ (r'\{\{|\}\}', Name.Attribute),
+ ],
+ 'element_content': [
+ (r'</', Name.Tag, 'end_tag'),
+ (r'(\{)', pushstate_root_callback),
+ (r'(<!--)', pushstate_element_content_xmlcomment_callback),
+ (r'(<\?)', pushstate_element_content_processing_instruction_callback),
+ (r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
+ (r'(<)', pushstate_element_content_starttag_callback),
+ (elementcontentchar, Literal),
+ (entityref, Literal),
+ (charref, Literal),
+ (r'\{\{|\}\}', Literal),
+ ],
+ 'end_tag': [
+ include('whitespace'),
+ (r'(>)', popstate_tag_callback),
+ (qname, Name.Tag),
+ ],
+ 'xmlspace_decl': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (r'preserve|strip', Keyword, '#pop'),
+ ],
+ 'declareordering': [
+ (r'\(:', Comment, 'comment'),
+ include('whitespace'),
+ (r'ordered|unordered', Keyword, '#pop'),
+ ],
+ 'xqueryversion': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (stringdouble, String.Double),
+ (stringsingle, String.Single),
+ (r'encoding', Keyword),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'pragma': [
+ (qname, Name.Variable, 'pragmacontents'),
+ ],
+ 'pragmacontents': [
+ (r'#\)', Punctuation, 'operator'),
+ (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
+ Literal),
+ (r'(\s+)', Whitespace),
+ ],
+ 'occurrenceindicator': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+ (r'\*|\?|\+', Operator, 'operator'),
+ (r':=', Operator, 'root'),
+ default('operator'),
+ ],
+ 'option': [
+ include('whitespace'),
+ (qname, Name.Variable, '#pop'),
+ ],
+ 'qname_braren': [
+ include('whitespace'),
+ (r'(\{)', pushstate_operator_root_callback),
+ (r'(\()', Punctuation, 'root'),
+ ],
+ 'element_qname': [
+ (qname, Name.Variable, 'root'),
+ ],
+ 'attribute_qname': [
+ (qname, Name.Variable, 'root'),
+ ],
+ 'root': [
+ include('whitespace'),
+ (r'\(:', Comment, 'comment'),
+
+ # handle operator state
+ # order on numbers matters - handle most complex first
+ (r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'),
+ (r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'),
+ (r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'),
+ (r'(\d+)', Number.Integer, 'operator'),
+ (r'(\.\.|\.|\))', Punctuation, 'operator'),
+ (r'(declare)(\s+)(construction)',
+ bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
+ (r'(declare)(\s+)(default)(\s+)(order)',
+ bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
+ (r'(declare)(\s+)(context)(\s+)(item)',
+ bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
+ (ncname + r':\*', Name, 'operator'),
+ (r'\*:'+ncname, Name.Tag, 'operator'),
+ (r'\*', Name.Tag, 'operator'),
+ (stringdouble, String.Double, 'operator'),
+ (stringsingle, String.Single, 'operator'),
+
+ (r'(\}|\])', popstate_callback),
+
+ # NAMESPACE DECL
+ (r'(declare)(\s+)(default)(\s+)(collation)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
+ Whitespace, Keyword.Declaration)),
+ (r'(module|declare)(\s+)(namespace)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
+ 'namespacedecl'),
+ (r'(declare)(\s+)(base-uri)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
+ 'namespacedecl'),
+
+ # NAMESPACE KEYWORD
+ (r'(declare)(\s+)(default)(\s+)(element|function)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
+ Whitespace, Keyword.Declaration),
+ 'namespacekeyword'),
+ (r'(import)(\s+)(schema|module)',
+ bygroups(Keyword.Pseudo, Whitespace, Keyword.Pseudo),
+ 'namespacekeyword'),
+ (r'(declare)(\s+)(copy-namespaces)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
+ 'namespacekeyword'),
+
+ # VARNAMEs
+ (r'(for|let|some|every)(\s+)(\$)',
+ bygroups(Keyword, Whitespace, Name.Variable), 'varname'),
+ (r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
+ bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword,
+ Whitespace, Name.Variable),
+ 'varname'),
+ (r'\$', Name.Variable, 'varname'),
+ (r'(declare)(\s+)(variable)(\s+)(\$)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
+ Whitespace, Name.Variable),
+ 'varname'),
+
+ # ANNOTATED GLOBAL VARIABLES AND FUNCTIONS
+ (r'(declare)(\s+)(\%)', bygroups(Keyword.Declaration, Whitespace,
+ Name.Decorator),
+ 'annotationname'),
+
+ # ITEMTYPE
+ (r'(\))(\s+)(as)', bygroups(Operator, Whitespace, Keyword),
+ 'itemtype'),
+
+ (r'(element|attribute|schema-element|schema-attribute|comment|'
+ r'text|node|document-node|empty-sequence)(\s+)(\()',
+ pushstate_operator_kindtest_callback),
+
+ (r'(processing-instruction)(\s+)(\()',
+ pushstate_operator_kindtestforpi_callback),
+
+ (r'(<!--)', pushstate_operator_xmlcomment_callback),
+
+ (r'(<\?)', pushstate_operator_processing_instruction_callback),
+
+ (r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
+
+ # (r'</', Name.Tag, 'end_tag'),
+ (r'(<)', pushstate_operator_starttag_callback),
+
+ (r'(declare)(\s+)(boundary-space)',
+ bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'xmlspace_decl'),
+
+ (r'(validate)(\s+)(lax|strict)',
+ pushstate_operator_root_validate_withmode),
+ (r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
+ (r'(typeswitch)(\s*)(\()', bygroups(Keyword, Whitespace,
+ Punctuation)),
+ (r'(switch)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)),
+ (r'(element|attribute|namespace)(\s*)(\{)',
+ pushstate_operator_root_construct_callback),
+
+ (r'(document|text|processing-instruction|comment)(\s*)(\{)',
+ pushstate_operator_root_construct_callback),
+ # ATTRIBUTE
+ (r'(attribute)(\s+)(?=' + qname + r')',
+ bygroups(Keyword, Whitespace), 'attribute_qname'),
+ # ELEMENT
+ (r'(element)(\s+)(?=' + qname + r')',
+ bygroups(Keyword, Whitespace), 'element_qname'),
+ # PROCESSING_INSTRUCTION
+ (r'(processing-instruction|namespace)(\s+)(' + ncname + r')(\s*)(\{)',
+ bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
+ Punctuation),
+ 'operator'),
+
+ (r'(declare|define)(\s+)(function)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration)),
+
+ (r'(\{|\[)', pushstate_operator_root_callback),
+
+ (r'(unordered|ordered)(\s*)(\{)',
+ pushstate_operator_order_callback),
+
+ (r'(map|array)(\s*)(\{)',
+ pushstate_operator_map_callback),
+
+ (r'(declare)(\s+)(ordering)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
+ 'declareordering'),
+
+ (r'(xquery)(\s+)(version)',
+ bygroups(Keyword.Pseudo, Whitespace, Keyword.Pseudo),
+ 'xqueryversion'),
+
+ (r'(\(#)(\s*)', bygroups(Punctuation, Whitespace), 'pragma'),
+
+ # sometimes return can occur in root state
+ (r'return', Keyword),
+
+ (r'(declare)(\s+)(option)', bygroups(Keyword.Declaration,
+ Whitespace,
+ Keyword.Declaration),
+ 'option'),
+
+ # URI LITERALS - single and double quoted
+ (r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
+ (r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
+
+ (r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
+ bygroups(Keyword, Punctuation)),
+ (r'(descendant|following-sibling|following|parent|preceding-sibling'
+ r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
+
+ (r'(if)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)),
+
+ (r'then|else', Keyword),
+
+ # eXist specific XQUF
+ (r'(update)(\s*)(insert|delete|replace|value|rename)',
+ bygroups(Keyword, Whitespace, Keyword)),
+ (r'(into|following|preceding|with)', Keyword),
+
+ # Marklogic specific
+ (r'(try)(\s*)', bygroups(Keyword, Whitespace), 'root'),
+ (r'(catch)(\s*)(\()(\$)',
+ bygroups(Keyword, Whitespace, Punctuation, Name.Variable),
+ 'varname'),
+
+
+ (r'(@'+qname+')', Name.Attribute, 'operator'),
+ (r'(@'+ncname+')', Name.Attribute, 'operator'),
+ (r'@\*:'+ncname, Name.Attribute, 'operator'),
+ (r'@\*', Name.Attribute, 'operator'),
+ (r'(@)', Name.Attribute, 'operator'),
+
+ (r'//|/|\+|-|;|,|\(|\)', Punctuation),
+
+ # STANDALONE QNAMES
+ (qname + r'(?=\s*\{)', Name.Tag, 'qname_braren'),
+ (qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
+ (r'(' + qname + ')(#)([0-9]+)', bygroups(Name.Function, Keyword.Type, Number.Integer)),
+ (qname, Name.Tag, 'operator'),
+ ]
+ }
+
+
+class QmlLexer(RegexLexer):
+ """
+ For QML files.
+
+ .. versionadded:: 1.6
+ """
+
+ # QML is based on javascript, so much of this is taken from the
+ # JavascriptLexer above.
+
+ name = 'QML'
+ url = 'https://doc.qt.io/qt-6/qmlapplications.html'
+ aliases = ['qml', 'qbs']
+ filenames = ['*.qml', '*.qbs']
+ mimetypes = ['application/x-qml', 'application/x-qt.qbs+qml']
+
+ # pasted from JavascriptLexer, with some additions
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'<!--', Comment),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ default('#pop')
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+
+ # QML insertions
+ (r'\bid\s*:\s*[A-Za-z][\w.]*', Keyword.Declaration,
+ 'slashstartsregex'),
+ (r'\b[A-Za-z][\w.]*\s*:', Keyword, 'slashstartsregex'),
+
+ # the rest from JavascriptLexer
+ (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+ r'this)\b', Keyword, 'slashstartsregex'),
+ (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
+ (r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
+ r'extends|final|float|goto|implements|import|int|interface|long|native|'
+ r'package|private|protected|public|short|static|super|synchronized|throws|'
+ r'transient|volatile)\b', Keyword.Reserved),
+ (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
+ (r'(Array|Boolean|Date|Error|Function|Math|netscape|'
+ r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
+ r'window)\b', Name.Builtin),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ ]
+ }
+
+
+class CirruLexer(RegexLexer):
+ r"""
+ * using ``()`` for expressions, but restricted in a same line
+ * using ``""`` for strings, with ``\`` for escaping chars
+ * using ``$`` as folding operator
+ * using ``,`` as unfolding operator
+ * using indentations for nested blocks
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Cirru'
+ url = 'http://cirru.org/'
+ aliases = ['cirru']
+ filenames = ['*.cirru']
+ mimetypes = ['text/x-cirru']
+ flags = re.MULTILINE
+
+ tokens = {
+ 'string': [
+ (r'[^"\\\n]+', String),
+ (r'\\', String.Escape, 'escape'),
+ (r'"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'.', String.Escape, '#pop'),
+ ],
+ 'function': [
+ (r'\,', Operator, '#pop'),
+ (r'[^\s"()]+', Name.Function, '#pop'),
+ (r'\)', Operator, '#pop'),
+ (r'(?=\n)', Text, '#pop'),
+ (r'\(', Operator, '#push'),
+ (r'"', String, ('#pop', 'string')),
+ (r'[ ]+', Text.Whitespace),
+ ],
+ 'line': [
+ (r'(?<!\w)\$(?!\w)', Operator, 'function'),
+ (r'\(', Operator, 'function'),
+ (r'\)', Operator),
+ (r'\n', Text, '#pop'),
+ (r'"', String, 'string'),
+ (r'[ ]+', Text.Whitespace),
+ (r'[+-]?[\d.]+\b', Number),
+ (r'[^\s"()]+', Name.Variable)
+ ],
+ 'root': [
+ (r'^\n+', Text.Whitespace),
+ default(('line', 'function')),
+ ]
+ }
+
+
+class SlimLexer(ExtendedRegexLexer):
+ """
+ For Slim markup.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Slim'
+ aliases = ['slim']
+ filenames = ['*.slim']
+ mimetypes = ['text/x-slim']
+
+ flags = re.IGNORECASE
+ _dot = r'(?: \|\n(?=.* \|)|.)'
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Text),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'css': [
+ (r'\.[\w:-]+', Name.Class, 'tag'),
+ (r'\#[\w:-]+', Name.Function, 'tag'),
+ ],
+
+ 'eval-or-plain': [
+ (r'([ \t]*==?)(.*\n)',
+ bygroups(Punctuation, using(RubyLexer)),
+ 'root'),
+ (r'[ \t]+[\w:-]+(?==)', Name.Attribute, 'html-attributes'),
+ default('plain'),
+ ],
+
+ 'content': [
+ include('css'),
+ (r'[\w:-]+:[ \t]*\n', Text, 'plain'),
+ (r'(-)(.*\n)',
+ bygroups(Punctuation, using(RubyLexer)),
+ '#pop'),
+ (r'\|' + _dot + r'*\n', _starts_block(Text, 'plain'), '#pop'),
+ (r'/' + _dot + r'*\n', _starts_block(Comment.Preproc, 'slim-comment-block'), '#pop'),
+ (r'[\w:-]+', Name.Tag, 'tag'),
+ include('eval-or-plain'),
+ ],
+
+ 'tag': [
+ include('css'),
+ (r'[<>]{1,2}(?=[ \t=])', Punctuation),
+ (r'[ \t]+\n', Punctuation, '#pop:2'),
+ include('eval-or-plain'),
+ ],
+
+ 'plain': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
+ (r'(#\{)(.*?)(\})',
+ bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'html-attributes': [
+ (r'=', Punctuation),
+ (r'"[^"]+"', using(RubyLexer), 'tag'),
+ (r'\'[^\']+\'', using(RubyLexer), 'tag'),
+ (r'\w+', Text, 'tag'),
+ ],
+
+ 'slim-comment-block': [
+ (_dot + '+', Comment.Preproc),
+ (r'\n', Text, 'root'),
+ ],
+ }
diff --git a/pygments/lexers/whiley.py b/pygments/lexers/whiley.py
new file mode 100644
index 0000000..ad9f2dc
--- /dev/null
+++ b/pygments/lexers/whiley.py
@@ -0,0 +1,116 @@
+"""
+ pygments.lexers.whiley
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Whiley language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Text
+
+__all__ = ['WhileyLexer']
+
+
+class WhileyLexer(RegexLexer):
+ """
+ Lexer for the Whiley programming language.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Whiley'
+ url = 'http://whiley.org/'
+ filenames = ['*.whiley']
+ aliases = ['whiley']
+ mimetypes = ['text/x-whiley']
+
+ # See the language specification:
+ # http://whiley.org/download/WhileyLanguageSpec.pdf
+
+ tokens = {
+ 'root': [
+ # Whitespace
+ (r'\s+', Text),
+
+ # Comments
+ (r'//.*', Comment.Single),
+ # don't parse empty comment as doc comment
+ (r'/\*\*/', Comment.Multiline),
+ (r'(?s)/\*\*.*?\*/', String.Doc),
+ (r'(?s)/\*.*?\*/', Comment.Multiline),
+
+ # Keywords
+ (words((
+ 'if', 'else', 'while', 'for', 'do', 'return',
+ 'switch', 'case', 'default', 'break', 'continue',
+ 'requires', 'ensures', 'where', 'assert', 'assume',
+ 'all', 'no', 'some', 'in', 'is', 'new',
+ 'throw', 'try', 'catch', 'debug', 'skip', 'fail',
+ 'finite', 'total'), suffix=r'\b'), Keyword.Reserved),
+ (words((
+ 'function', 'method', 'public', 'private', 'protected',
+ 'export', 'native'), suffix=r'\b'), Keyword.Declaration),
+ # "constant" & "type" are not keywords unless used in declarations
+ (r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b',
+ bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(bool|byte|int|real|any|void)\b', Keyword.Type),
+ # "from" is not a keyword unless used with import
+ (r'(import)(\s+)(\*)([^\S\n]+)(from)\b',
+ bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)),
+ (r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b',
+ bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)),
+ (r'(package|import)\b', Keyword.Namespace),
+
+ # standard library: https://github.com/Whiley/WhileyLibs/
+ (words((
+ # types defined in whiley.lang.Int
+ 'i8', 'i16', 'i32', 'i64',
+ 'u8', 'u16', 'u32', 'u64',
+ 'uint', 'nat',
+
+ # whiley.lang.Any
+ 'toString'), suffix=r'\b'), Name.Builtin),
+
+ # byte literal
+ (r'[01]+b', Number.Bin),
+
+ # decimal literal
+ (r'[0-9]+\.[0-9]+', Number.Float),
+ # match "1." but not ranges like "3..5"
+ (r'[0-9]+\.(?!\.)', Number.Float),
+
+ # integer literal
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+
+ # character literal
+ (r"""'[^\\]'""", String.Char),
+ (r"""(')(\\['"\\btnfr])(')""",
+ bygroups(String.Char, String.Escape, String.Char)),
+
+ # string literal
+ (r'"', String, 'string'),
+
+ # operators and punctuation
+ (r'[{}()\[\],.;]', Punctuation),
+ (r'[+\-*/%&|<>^!~@=:?'
+ # unicode operators
+ r'\u2200\u2203\u2205\u2282\u2286\u2283\u2287'
+ r'\u222A\u2229\u2264\u2265\u2208\u2227\u2228'
+ r']', Operator),
+
+ # identifier
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\[btnfr]', String.Escape),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\.', String),
+ (r'[^\\"]+', String),
+ ],
+ }
diff --git a/pygments/lexers/wowtoc.py b/pygments/lexers/wowtoc.py
new file mode 100644
index 0000000..320ae01
--- /dev/null
+++ b/pygments/lexers/wowtoc.py
@@ -0,0 +1,120 @@
+"""
+ pygments.lexers.wowtoc
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for World of Warcraft TOC files
+
+ TOC files describe game addons.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Name, Text, Punctuation, String, Keyword
+
+__all__ = ["WoWTocLexer"]
+
+def _create_tag_line_pattern(inner_pattern, ignore_case=False):
+ return ((r"(?i)" if ignore_case else r"")
+ + r"^(##)( *)" # groups 1, 2
+ + inner_pattern # group 3
+ + r"( *)(:)( *)(.*?)( *)$") # groups 4, 5, 6, 7, 8
+
+
+def _create_tag_line_token(inner_pattern, inner_token, ignore_case=False):
+ # this function template-izes the tag line for a specific type of tag, which will
+ # have a different pattern and different token. otherwise, everything about a tag
+ # line is the same
+ return (
+ _create_tag_line_pattern(inner_pattern, ignore_case=ignore_case),
+ bygroups(
+ Keyword.Declaration,
+ Text.Whitespace,
+ inner_token,
+ Text.Whitespace,
+ Punctuation,
+ Text.Whitespace,
+ String,
+ Text.Whitespace,
+ ),
+ )
+
+
+class WoWTocLexer(RegexLexer):
+ """
+ Lexer for World of Warcraft TOC files.
+
+ .. versionadded:: 2.14
+ """
+
+ name = "World of Warcraft TOC"
+ aliases = ["wowtoc"]
+ filenames = ["*.toc"]
+
+ tokens = {
+ "root": [
+ # official localized tags, Notes and Title
+ # (normal part is insensitive, locale part is sensitive)
+ _create_tag_line_token(
+ r"((?:[nN][oO][tT][eE][sS]|[tT][iI][tT][lL][eE])-(?:ptBR|zhCN|"
+ r"enCN|frFR|deDE|itIT|esMX|ptPT|koKR|ruRU|esES|zhTW|enTW|enGB|enUS))",
+ Name.Builtin,
+ ),
+ # other official tags
+ _create_tag_line_token(
+ r"(Interface|Title|Notes|RequiredDeps|Dep[^: ]*|OptionalDeps|"
+ r"LoadOnDemand|LoadWith|LoadManagers|SavedVariablesPerCharacter|"
+ r"SavedVariables|DefaultState|Secure|Author|Version)",
+ Name.Builtin,
+ ignore_case=True,
+ ),
+ # user-defined tags
+ _create_tag_line_token(
+ r"(X-[^: ]*)",
+ Name.Variable,
+ ignore_case=True,
+ ),
+ # non-conforming tags, but still valid
+ _create_tag_line_token(
+ r"([^: ]*)",
+ Name.Other,
+ ),
+
+ # Comments
+ (r"^#.*$", Comment),
+
+ # Addon Files
+ (r"^.+$", Name),
+ ]
+ }
+
+ def analyse_text(text):
+ # at time of writing, this file suffix conflict's with one of Tex's in
+ # markup.py. Tex's anaylse_text() appears to be definitive (binary) and does not
+ # share any likeness to WoW TOCs, which means we wont have to compete with it by
+ # abitrary increments in score.
+
+ result = 0
+
+ # while not required, an almost certain marker of WoW TOC's is the interface tag
+ # if this tag is omitted, players will need to opt-in to loading the addon with
+ # an options change ("Load out of date addons"). the value is also standardized:
+ # `<major><minor><patch>`, with minor and patch being two-digit zero-padded.
+ interface_pattern = _create_tag_line_pattern(r"(Interface)", ignore_case=True)
+ match = re.search(interface_pattern, text)
+ if match and re.match(r"(\d+)(\d{2})(\d{2})", match.group(7)):
+ result += 0.8
+
+ casefolded = text.casefold()
+ # Lua file listing is good marker too, but probably conflicts with many other
+ # lexers
+ if ".lua" in casefolded:
+ result += 0.1
+ # ditto for XML files, but they're less used in WoW TOCs
+ if ".xml" in casefolded:
+ result += 0.05
+
+ return result
diff --git a/pygments/lexers/wren.py b/pygments/lexers/wren.py
new file mode 100644
index 0000000..f0f3793
--- /dev/null
+++ b/pygments/lexers/wren.py
@@ -0,0 +1,99 @@
+"""
+ pygments.lexers.wren
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Wren.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import include, RegexLexer, words
+from pygments.token import Whitespace, Punctuation, Keyword, Name, Comment, \
+ Operator, Number, String, Error
+
+__all__ = ['WrenLexer']
+
+class WrenLexer(RegexLexer):
+ """
+ For Wren source code, version 0.4.0.
+
+ .. versionadded:: 2.14.0
+ """
+ name = 'Wren'
+ url = 'https://wren.io'
+ aliases = ['wren']
+ filenames = ['*.wren']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ # Whitespace.
+ (r'\s+', Whitespace),
+ (r'[,\\\[\]{}]', Punctuation),
+
+ # Really 'root', not '#push': in 'interpolation',
+ # parentheses inside the interpolation expression are
+ # Punctuation, not String.Interpol.
+ (r'\(', Punctuation, 'root'),
+ (r'\)', Punctuation, '#pop'),
+
+ # Keywords.
+ (words((
+ 'as', 'break', 'class', 'construct', 'continue', 'else',
+ 'for', 'foreign', 'if', 'import', 'return', 'static', 'super',
+ 'this', 'var', 'while'), prefix = r'(?<!\.)',
+ suffix = r'\b'), Keyword),
+
+ (words((
+ 'true', 'false', 'null'), prefix = r'(?<!\.)',
+ suffix = r'\b'), Keyword.Constant),
+
+ (words((
+ 'in', 'is'), prefix = r'(?<!\.)',
+ suffix = r'\b'), Operator.Word),
+
+ # Comments.
+ (r'/\*', Comment.Multiline, 'comment'), # Multiline, can nest.
+ (r'//.*?$', Comment.Single), # Single line.
+ (r'#.*?(\(.*?\))?$', Comment.Special), # Attribute or shebang.
+
+ # Names and operators.
+ (r'[!%&*+\-./:<=>?\\^|~]+', Operator),
+ (r'[a-z][a-zA-Z_0-9]*', Name),
+ (r'[A-Z][a-zA-Z_0-9]*', Name.Class),
+ (r'__[a-zA-Z_0-9]*', Name.Variable.Class),
+ (r'_[a-zA-Z_0-9]*', Name.Variable.Instance),
+
+ # Numbers.
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'\d+(\.\d+)?([eE][-+]?\d+)?', Number.Float),
+
+ # Strings.
+ (r'""".*?"""', String), # Raw string
+ (r'"', String, 'string'), # Other string
+ ],
+ 'comment': [
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'([^*/]|\*(?!/)|/(?!\*))+', Comment.Multiline),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\[\\%"0abefnrtv]', String.Escape), # Escape.
+ (r'\\x[a-fA-F0-9]{2}', String.Escape), # Byte escape.
+ (r'\\u[a-fA-F0-9]{4}', String.Escape), # Unicode escape.
+ (r'\\U[a-fA-F0-9]{8}', String.Escape), # Long Unicode escape.
+
+ (r'%\(', String.Interpol, 'interpolation'),
+ (r'[^\\"%]+', String), # All remaining characters.
+ ],
+ 'interpolation': [
+ # redefine closing paren to be String.Interpol
+ (r'\)', String.Interpol, '#pop'),
+ include('root'),
+ ],
+ }
diff --git a/pygments/lexers/x10.py b/pygments/lexers/x10.py
new file mode 100644
index 0000000..3e77c43
--- /dev/null
+++ b/pygments/lexers/x10.py
@@ -0,0 +1,67 @@
+"""
+ pygments.lexers.x10
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the X10 programming language.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Text, Comment, Keyword, String
+
+__all__ = ['X10Lexer']
+
+
+class X10Lexer(RegexLexer):
+ """
+ For the X10 language.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'X10'
+ url = 'http://x10-lang.org/'
+ aliases = ['x10', 'xten']
+ filenames = ['*.x10']
+ mimetypes = ['text/x-x10']
+
+ keywords = (
+ 'as', 'assert', 'async', 'at', 'athome', 'ateach', 'atomic',
+ 'break', 'case', 'catch', 'class', 'clocked', 'continue',
+ 'def', 'default', 'do', 'else', 'final', 'finally', 'finish',
+ 'for', 'goto', 'haszero', 'here', 'if', 'import', 'in',
+ 'instanceof', 'interface', 'isref', 'new', 'offer',
+ 'operator', 'package', 'return', 'struct', 'switch', 'throw',
+ 'try', 'type', 'val', 'var', 'when', 'while'
+ )
+
+ types = (
+ 'void'
+ )
+
+ values = (
+ 'false', 'null', 'self', 'super', 'this', 'true'
+ )
+
+ modifiers = (
+ 'abstract', 'extends', 'implements', 'native', 'offers',
+ 'private', 'property', 'protected', 'public', 'static',
+ 'throws', 'transient'
+ )
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*(.|\n)*?\*/', Comment.Multiline),
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword),
+ (r'\b(%s)\b' % '|'.join(types), Keyword.Type),
+ (r'\b(%s)\b' % '|'.join(values), Keyword.Constant),
+ (r'\b(%s)\b' % '|'.join(modifiers), Keyword.Declaration),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
+ (r'.', Text)
+ ],
+ }
diff --git a/pygments/lexers/xorg.py b/pygments/lexers/xorg.py
new file mode 100644
index 0000000..196ebcf
--- /dev/null
+++ b/pygments/lexers/xorg.py
@@ -0,0 +1,37 @@
+"""
+ pygments.lexers.xorg
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Xorg configs.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, String, Name, Text
+
+__all__ = ['XorgLexer']
+
+
+class XorgLexer(RegexLexer):
+ """Lexer for xorg.conf files."""
+ name = 'Xorg'
+ url = 'https://www.x.org/wiki/'
+ aliases = ['xorg.conf']
+ filenames = ['xorg.conf']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'#.*$', Comment),
+
+ (r'((?:Sub)?Section)(\s+)("\w+")',
+ bygroups(String.Escape, Text, String.Escape)),
+ (r'(End(?:Sub)?Section)', String.Escape),
+
+ (r'(\w+)(\s+)([^\n#]+)',
+ bygroups(Name.Builtin, Text, Name.Constant)),
+ ],
+ }
diff --git a/pygments/lexers/yang.py b/pygments/lexers/yang.py
new file mode 100644
index 0000000..1df675b
--- /dev/null
+++ b/pygments/lexers/yang.py
@@ -0,0 +1,104 @@
+"""
+ pygments.lexers.yang
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the YANG 1.1 modeling language. See :rfc:`7950`.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Token, Name, String, Comment, Number
+
+__all__ = ['YangLexer']
+
+
+class YangLexer(RegexLexer):
+ """
+ Lexer for YANG, based on RFC7950.
+
+ .. versionadded:: 2.7
+ """
+ name = 'YANG'
+ url = 'https://tools.ietf.org/html/rfc7950/'
+ aliases = ['yang']
+ filenames = ['*.yang']
+ mimetypes = ['application/yang']
+
+ #Keywords from RFC7950 ; oriented at BNF style
+ TOP_STMTS_KEYWORDS = ("module", "submodule")
+ MODULE_HEADER_STMT_KEYWORDS = ("belongs-to", "namespace", "prefix", "yang-version")
+ META_STMT_KEYWORDS = ("contact", "description", "organization",
+ "reference", "revision")
+ LINKAGE_STMTS_KEYWORDS = ("import", "include", "revision-date")
+ BODY_STMT_KEYWORDS = ("action", "argument", "augment", "deviation",
+ "extension", "feature", "grouping", "identity",
+ "if-feature", "input", "notification", "output",
+ "rpc", "typedef")
+ DATA_DEF_STMT_KEYWORDS = ("anydata", "anyxml", "case", "choice",
+ "config", "container", "deviate", "leaf",
+ "leaf-list", "list", "must", "presence",
+ "refine", "uses", "when")
+ TYPE_STMT_KEYWORDS = ("base", "bit", "default", "enum", "error-app-tag",
+ "error-message", "fraction-digits", "length",
+ "max-elements", "min-elements", "modifier",
+ "ordered-by", "path", "pattern", "position",
+ "range", "require-instance", "status", "type",
+ "units", "value", "yin-element")
+ LIST_STMT_KEYWORDS = ("key", "mandatory", "unique")
+
+ #RFC7950 other keywords
+ CONSTANTS_KEYWORDS = ("add", "current", "delete", "deprecated", "false",
+ "invert-match", "max", "min", "not-supported",
+ "obsolete", "replace", "true", "unbounded", "user")
+
+ #RFC7950 Built-In Types
+ TYPES = ("binary", "bits", "boolean", "decimal64", "empty", "enumeration",
+ "identityref", "instance-identifier", "int16", "int32", "int64",
+ "int8", "leafref", "string", "uint16", "uint32", "uint64",
+ "uint8", "union")
+
+ suffix_re_pattern = r'(?=[^\w\-:])'
+
+ tokens = {
+ 'comments': [
+ (r'[^*/]', Comment),
+ (r'/\*', Comment, '#push'),
+ (r'\*/', Comment, '#pop'),
+ (r'[*/]', Comment),
+ ],
+ "root": [
+ (r'\s+', Text.Whitespace),
+ (r'[{};]+', Token.Punctuation),
+ (r'(?<![\-\w])(and|or|not|\+|\.)(?![\-\w])', Token.Operator),
+
+ (r'"(?:\\"|[^"])*?"', String.Double),
+ (r"'(?:\\'|[^'])*?'", String.Single),
+
+ (r'/\*', Comment, 'comments'),
+ (r'//.*?$', Comment),
+
+ #match BNF stmt for `node-identifier` with [ prefix ":"]
+ (r'(?:^|(?<=[\s{};]))([\w.-]+)(:)([\w.-]+)(?=[\s{};])',
+ bygroups(Name.Namespace, Token.Punctuation, Name.Variable)),
+
+ #match BNF stmt `date-arg-str`
+ (r'([0-9]{4}\-[0-9]{2}\-[0-9]{2})(?=[\s{};])', Name.Label),
+ (r'([0-9]+\.[0-9]+)(?=[\s{};])', Number.Float),
+ (r'([0-9]+)(?=[\s{};])', Number.Integer),
+
+ (words(TOP_STMTS_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(MODULE_HEADER_STMT_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(META_STMT_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(LINKAGE_STMTS_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(BODY_STMT_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(DATA_DEF_STMT_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(TYPE_STMT_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(LIST_STMT_KEYWORDS, suffix=suffix_re_pattern), Token.Keyword),
+ (words(TYPES, suffix=suffix_re_pattern), Name.Class),
+ (words(CONSTANTS_KEYWORDS, suffix=suffix_re_pattern), Name.Class),
+
+ (r'[^;{}\s\'"]+', Name.Variable),
+ ]
+ }
diff --git a/pygments/lexers/zig.py b/pygments/lexers/zig.py
new file mode 100644
index 0000000..be897a9
--- /dev/null
+++ b/pygments/lexers/zig.py
@@ -0,0 +1,124 @@
+"""
+ pygments.lexers.zig
+ ~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Zig.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Whitespace
+
+__all__ = ['ZigLexer']
+
+
+class ZigLexer(RegexLexer):
+ """
+ Lexer for the Zig language.
+
+ grammar: https://ziglang.org/documentation/master/#Grammar
+ """
+ name = 'Zig'
+ url = 'http://www.ziglang.org'
+ aliases = ['zig']
+ filenames = ['*.zig']
+ mimetypes = ['text/zig']
+
+ type_keywords = (
+ words(('bool', 'f16', 'f32', 'f64', 'f128', 'void', 'noreturn', 'type',
+ 'anyerror', 'promise', 'i0', 'u0', 'isize', 'usize', 'comptime_int',
+ 'comptime_float', 'c_short', 'c_ushort', 'c_int', 'c_uint', 'c_long',
+ 'c_ulong', 'c_longlong', 'c_ulonglong', 'c_longdouble', 'c_void'
+ 'i8', 'u8', 'i16', 'u16', 'i32', 'u32', 'i64', 'u64', 'i128',
+ 'u128'), suffix=r'\b'),
+ Keyword.Type)
+
+ storage_keywords = (
+ words(('const', 'var', 'extern', 'packed', 'export', 'pub', 'noalias',
+ 'inline', 'comptime', 'nakedcc', 'stdcallcc', 'volatile', 'allowzero',
+ 'align', 'linksection', 'threadlocal'), suffix=r'\b'),
+ Keyword.Reserved)
+
+ structure_keywords = (
+ words(('struct', 'enum', 'union', 'error'), suffix=r'\b'),
+ Keyword)
+
+ statement_keywords = (
+ words(('break', 'return', 'continue', 'asm', 'defer', 'errdefer',
+ 'unreachable', 'try', 'catch', 'async', 'await', 'suspend',
+ 'resume', 'cancel'), suffix=r'\b'),
+ Keyword)
+
+ conditional_keywords = (
+ words(('if', 'else', 'switch', 'and', 'or', 'orelse'), suffix=r'\b'),
+ Keyword)
+
+ repeat_keywords = (
+ words(('while', 'for'), suffix=r'\b'),
+ Keyword)
+
+ other_keywords = (
+ words(('fn', 'usingnamespace', 'test'), suffix=r'\b'),
+ Keyword)
+
+ constant_keywords = (
+ words(('true', 'false', 'null', 'undefined'), suffix=r'\b'),
+ Keyword.Constant)
+
+ tokens = {
+ 'root': [
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'//.*?\n', Comment.Single),
+
+ # Keywords
+ statement_keywords,
+ storage_keywords,
+ structure_keywords,
+ repeat_keywords,
+ type_keywords,
+ constant_keywords,
+ conditional_keywords,
+ other_keywords,
+
+ # Floats
+ (r'0x[0-9a-fA-F]+\.[0-9a-fA-F]+([pP][\-+]?[0-9a-fA-F]+)?', Number.Float),
+ (r'0x[0-9a-fA-F]+\.?[pP][\-+]?[0-9a-fA-F]+', Number.Float),
+ (r'[0-9]+\.[0-9]+([eE][-+]?[0-9]+)?', Number.Float),
+ (r'[0-9]+\.?[eE][-+]?[0-9]+', Number.Float),
+
+ # Integers
+ (r'0b[01]+', Number.Bin),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+
+ # Identifier
+ (r'@[a-zA-Z_]\w*', Name.Builtin),
+ (r'[a-zA-Z_]\w*', Name),
+
+ # Characters
+ (r'\'\\\'\'', String.Escape),
+ (r'\'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])\'',
+ String.Escape),
+ (r'\'[^\\\']\'', String),
+
+ # Strings
+ (r'\\\\[^\n]*', String.Heredoc),
+ (r'c\\\\[^\n]*', String.Heredoc),
+ (r'c?"', String, 'string'),
+
+ # Operators, Punctuation
+ (r'[+%=><|^!?/\-*&~:]', Operator),
+ (r'[{}()\[\],.;]', Punctuation)
+ ],
+ 'string': [
+ (r'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])',
+ String.Escape),
+ (r'[^\\"\n]+', String),
+ (r'"', String, '#pop')
+ ]
+ }
diff --git a/pygments/modeline.py b/pygments/modeline.py
new file mode 100644
index 0000000..4363083
--- /dev/null
+++ b/pygments/modeline.py
@@ -0,0 +1,43 @@
+"""
+ pygments.modeline
+ ~~~~~~~~~~~~~~~~~
+
+ A simple modeline parser (based on pymodeline).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+__all__ = ['get_filetype_from_buffer']
+
+
+modeline_re = re.compile(r'''
+ (?: vi | vim | ex ) (?: [<=>]? \d* )? :
+ .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
+''', re.VERBOSE)
+
+
+def get_filetype_from_line(l):
+ m = modeline_re.search(l)
+ if m:
+ return m.group(1)
+
+
+def get_filetype_from_buffer(buf, max_lines=5):
+ """
+ Scan the buffer for modelines and return filetype if one is found.
+ """
+ lines = buf.splitlines()
+ for l in lines[-1:-max_lines-1:-1]:
+ ret = get_filetype_from_line(l)
+ if ret:
+ return ret
+ for i in range(max_lines, -1, -1):
+ if i < len(lines):
+ ret = get_filetype_from_line(lines[i])
+ if ret:
+ return ret
+
+ return None
diff --git a/pygments/plugin.py b/pygments/plugin.py
new file mode 100644
index 0000000..0ffef47
--- /dev/null
+++ b/pygments/plugin.py
@@ -0,0 +1,88 @@
+"""
+ pygments.plugin
+ ~~~~~~~~~~~~~~~
+
+ Pygments plugin interface. By default, this tries to use
+ ``importlib.metadata``, which is in the Python standard
+ library since Python 3.8, or its ``importlib_metadata``
+ backport for earlier versions of Python. It falls back on
+ ``pkg_resources`` if not found. Finally, if ``pkg_resources``
+ is not found either, no plugins are loaded at all.
+
+ lexer plugins::
+
+ [pygments.lexers]
+ yourlexer = yourmodule:YourLexer
+
+ formatter plugins::
+
+ [pygments.formatters]
+ yourformatter = yourformatter:YourFormatter
+ /.ext = yourformatter:YourFormatter
+
+ As you can see, you can define extensions for the formatter
+ with a leading slash.
+
+ syntax plugins::
+
+ [pygments.styles]
+ yourstyle = yourstyle:YourStyle
+
+ filter plugin::
+
+ [pygments.filter]
+ yourfilter = yourfilter:YourFilter
+
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+LEXER_ENTRY_POINT = 'pygments.lexers'
+FORMATTER_ENTRY_POINT = 'pygments.formatters'
+STYLE_ENTRY_POINT = 'pygments.styles'
+FILTER_ENTRY_POINT = 'pygments.filters'
+
+
+def iter_entry_points(group_name):
+ try:
+ from importlib.metadata import entry_points
+ except ImportError:
+ try:
+ from importlib_metadata import entry_points
+ except ImportError:
+ try:
+ from pkg_resources import iter_entry_points
+ except (ImportError, OSError):
+ return []
+ else:
+ return iter_entry_points(group_name)
+ groups = entry_points()
+ if hasattr(groups, 'select'):
+ # New interface in Python 3.10 and newer versions of the
+ # importlib_metadata backport.
+ return groups.select(group=group_name)
+ else:
+ # Older interface, deprecated in Python 3.10 and recent
+ # importlib_metadata, but we need it in Python 3.8 and 3.9.
+ return groups.get(group_name, [])
+
+
+def find_plugin_lexers():
+ for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
+ yield entrypoint.load()
+
+
+def find_plugin_formatters():
+ for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
+ yield entrypoint.name, entrypoint.load()
+
+
+def find_plugin_styles():
+ for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
+ yield entrypoint.name, entrypoint.load()
+
+
+def find_plugin_filters():
+ for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
+ yield entrypoint.name, entrypoint.load()
diff --git a/pygments/regexopt.py b/pygments/regexopt.py
new file mode 100644
index 0000000..ae00791
--- /dev/null
+++ b/pygments/regexopt.py
@@ -0,0 +1,91 @@
+"""
+ pygments.regexopt
+ ~~~~~~~~~~~~~~~~~
+
+ An algorithm that generates optimized regexes for matching long lists of
+ literal strings.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from re import escape
+from os.path import commonprefix
+from itertools import groupby
+from operator import itemgetter
+
+CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
+FIRST_ELEMENT = itemgetter(0)
+
+
+def make_charset(letters):
+ return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
+
+
+def regex_opt_inner(strings, open_paren):
+ """Return a regex that matches any string in the sorted list of strings."""
+ close_paren = open_paren and ')' or ''
+ # print strings, repr(open_paren)
+ if not strings:
+ # print '-> nothing left'
+ return ''
+ first = strings[0]
+ if len(strings) == 1:
+ # print '-> only 1 string'
+ return open_paren + escape(first) + close_paren
+ if not first:
+ # print '-> first string empty'
+ return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ + '?' + close_paren
+ if len(first) == 1:
+ # multiple one-char strings? make a charset
+ oneletter = []
+ rest = []
+ for s in strings:
+ if len(s) == 1:
+ oneletter.append(s)
+ else:
+ rest.append(s)
+ if len(oneletter) > 1: # do we have more than one oneletter string?
+ if rest:
+ # print '-> 1-character + rest'
+ return open_paren + regex_opt_inner(rest, '') + '|' \
+ + make_charset(oneletter) + close_paren
+ # print '-> only 1-character'
+ return open_paren + make_charset(oneletter) + close_paren
+ prefix = commonprefix(strings)
+ if prefix:
+ plen = len(prefix)
+ # we have a prefix for all strings
+ # print '-> prefix:', prefix
+ return open_paren + escape(prefix) \
+ + regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ + close_paren
+ # is there a suffix?
+ strings_rev = [s[::-1] for s in strings]
+ suffix = commonprefix(strings_rev)
+ if suffix:
+ slen = len(suffix)
+ # print '-> suffix:', suffix[::-1]
+ return open_paren \
+ + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ + escape(suffix[::-1]) + close_paren
+ # recurse on common 1-string prefixes
+ # print '-> last resort'
+ return open_paren + \
+ '|'.join(regex_opt_inner(list(group[1]), '')
+ for group in groupby(strings, lambda s: s[0] == first[0])) \
+ + close_paren
+
+
+def regex_opt(strings, prefix='', suffix=''):
+ """Return a compiled regex that matches any string in the given list.
+
+ The strings to match must be literal strings, not regexes. They will be
+ regex-escaped.
+
+ *prefix* and *suffix* are pre- and appended to the final regex.
+ """
+ strings = sorted(strings)
+ return prefix + regex_opt_inner(strings, '(') + suffix
diff --git a/pygments/scanner.py b/pygments/scanner.py
new file mode 100644
index 0000000..d47ed48
--- /dev/null
+++ b/pygments/scanner.py
@@ -0,0 +1,104 @@
+"""
+ pygments.scanner
+ ~~~~~~~~~~~~~~~~
+
+ This library implements a regex based scanner. Some languages
+ like Pascal are easy to parse but have some keywords that
+ depend on the context. Because of this it's impossible to lex
+ that just by using a regular expression lexer like the
+ `RegexLexer`.
+
+ Have a look at the `DelphiLexer` to get an idea of how to use
+ this scanner.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import re
+
+
+class EndOfText(RuntimeError):
+ """
+ Raise if end of text is reached and the user
+ tried to call a match function.
+ """
+
+
+class Scanner:
+ """
+ Simple scanner
+
+ All method patterns are regular expression strings (not
+ compiled expressions!)
+ """
+
+ def __init__(self, text, flags=0):
+ """
+ :param text: The text which should be scanned
+ :param flags: default regular expression flags
+ """
+ self.data = text
+ self.data_length = len(text)
+ self.start_pos = 0
+ self.pos = 0
+ self.flags = flags
+ self.last = None
+ self.match = None
+ self._re_cache = {}
+
+ def eos(self):
+ """`True` if the scanner reached the end of text."""
+ return self.pos >= self.data_length
+ eos = property(eos, eos.__doc__)
+
+ def check(self, pattern):
+ """
+ Apply `pattern` on the current position and return
+ the match object. (Doesn't touch pos). Use this for
+ lookahead.
+ """
+ if self.eos:
+ raise EndOfText()
+ if pattern not in self._re_cache:
+ self._re_cache[pattern] = re.compile(pattern, self.flags)
+ return self._re_cache[pattern].match(self.data, self.pos)
+
+ def test(self, pattern):
+ """Apply a pattern on the current position and check
+ if it patches. Doesn't touch pos.
+ """
+ return self.check(pattern) is not None
+
+ def scan(self, pattern):
+ """
+ Scan the text for the given pattern and update pos/match
+ and related fields. The return value is a boolean that
+ indicates if the pattern matched. The matched value is
+ stored on the instance as ``match``, the last value is
+ stored as ``last``. ``start_pos`` is the position of the
+ pointer before the pattern was matched, ``pos`` is the
+ end position.
+ """
+ if self.eos:
+ raise EndOfText()
+ if pattern not in self._re_cache:
+ self._re_cache[pattern] = re.compile(pattern, self.flags)
+ self.last = self.match
+ m = self._re_cache[pattern].match(self.data, self.pos)
+ if m is None:
+ return False
+ self.start_pos = m.start()
+ self.pos = m.end()
+ self.match = m.group()
+ return True
+
+ def get_char(self):
+ """Scan exactly one char."""
+ self.scan('.')
+
+ def __repr__(self):
+ return '<%s %d/%d>' % (
+ self.__class__.__name__,
+ self.pos,
+ self.data_length
+ )
diff --git a/pygments/sphinxext.py b/pygments/sphinxext.py
new file mode 100644
index 0000000..3ea2e36
--- /dev/null
+++ b/pygments/sphinxext.py
@@ -0,0 +1,217 @@
+"""
+ pygments.sphinxext
+ ~~~~~~~~~~~~~~~~~~
+
+ Sphinx extension to generate automatic documentation of lexers,
+ formatters and filters.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import Directive
+from sphinx.util.nodes import nested_parse_with_titles
+
+
+MODULEDOC = '''
+.. module:: %s
+
+%s
+%s
+'''
+
+LEXERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+ :MIME types: %s
+
+ %s
+
+'''
+
+FMTERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+
+ %s
+
+'''
+
+FILTERDOC = '''
+.. class:: %s
+
+ :Name: %s
+
+ %s
+
+'''
+
+
+class PygmentsDoc(Directive):
+ """
+ A directive to collect all lexers/formatters/filters and generate
+ autoclass directives for them.
+ """
+ has_content = False
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {}
+
+ def run(self):
+ self.filenames = set()
+ if self.arguments[0] == 'lexers':
+ out = self.document_lexers()
+ elif self.arguments[0] == 'formatters':
+ out = self.document_formatters()
+ elif self.arguments[0] == 'filters':
+ out = self.document_filters()
+ elif self.arguments[0] == 'lexers_overview':
+ out = self.document_lexers_overview()
+ else:
+ raise Exception('invalid argument for "pygmentsdoc" directive')
+ node = nodes.compound()
+ vl = ViewList(out.split('\n'), source='')
+ nested_parse_with_titles(self.state, vl, node)
+ for fn in self.filenames:
+ self.state.document.settings.record_dependencies.add(fn)
+ return node.children
+
+ def document_lexers_overview(self):
+ """Generate a tabular overview of all lexers.
+
+ The columns are the lexer name, the extensions handled by this lexer
+ (or "None"), the aliases and a link to the lexer class."""
+ from pygments.lexers._mapping import LEXERS
+ import pygments.lexers
+ out = []
+
+ table = []
+
+ def format_link(name, url):
+ if url:
+ return f'`{name} <{url}>`_'
+ return name
+
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
+ lexer_cls = pygments.lexers.find_lexer_class(data[1])
+ extensions = lexer_cls.filenames + lexer_cls.alias_filenames
+
+ table.append({
+ 'name': format_link(data[1], lexer_cls.url),
+ 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
+ 'aliases': ', '.join(data[2]),
+ 'class': f'{data[0]}.{classname}'
+ })
+
+ column_names = ['name', 'extensions', 'aliases', 'class']
+ column_lengths = [max([len(row[column]) for row in table if row[column]])
+ for column in column_names]
+
+ def write_row(*columns):
+ """Format a table row"""
+ out = []
+ for l, c in zip(column_lengths, columns):
+ if c:
+ out.append(c.ljust(l))
+ else:
+ out.append(' '*l)
+
+ return ' '.join(out)
+
+ def write_seperator():
+ """Write a table separator row"""
+ sep = ['='*c for c in column_lengths]
+ return write_row(*sep)
+
+ out.append(write_seperator())
+ out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
+ out.append(write_seperator())
+ for row in table:
+ out.append(write_row(
+ row['name'],
+ row['extensions'],
+ row['aliases'],
+ f':class:`~{row["class"]}`'))
+ out.append(write_seperator())
+
+ return '\n'.join(out)
+
+ def document_lexers(self):
+ from pygments.lexers._mapping import LEXERS
+ out = []
+ modules = {}
+ moduledocstrings = {}
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
+ module = data[0]
+ mod = __import__(module, None, None, [classname])
+ self.filenames.add(mod.__file__)
+ cls = getattr(mod, classname)
+ if not cls.__doc__:
+ print("Warning: %s does not have a docstring." % classname)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ modules.setdefault(module, []).append((
+ classname,
+ ', '.join(data[2]) or 'None',
+ ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
+ ', '.join(data[4]) or 'None',
+ docstring))
+ if module not in moduledocstrings:
+ moddoc = mod.__doc__
+ if isinstance(moddoc, bytes):
+ moddoc = moddoc.decode('utf8')
+ moduledocstrings[module] = moddoc
+
+ for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
+ if moduledocstrings[module] is None:
+ raise Exception("Missing docstring for %s" % (module,))
+ heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
+ out.append(MODULEDOC % (module, heading, '-'*len(heading)))
+ for data in lexers:
+ out.append(LEXERDOC % data)
+
+ return ''.join(out)
+
+ def document_formatters(self):
+ from pygments.formatters import FORMATTERS
+
+ out = []
+ for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
+ module = data[0]
+ mod = __import__(module, None, None, [classname])
+ self.filenames.add(mod.__file__)
+ cls = getattr(mod, classname)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ heading = cls.__name__
+ out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
+ ', '.join(data[3]).replace('*', '\\*') or 'None',
+ docstring))
+ return ''.join(out)
+
+ def document_filters(self):
+ from pygments.filters import FILTERS
+
+ out = []
+ for name, cls in FILTERS.items():
+ self.filenames.add(sys.modules[cls.__module__].__file__)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ out.append(FILTERDOC % (cls.__name__, name, docstring))
+ return ''.join(out)
+
+
+def setup(app):
+ app.add_directive('pygmentsdoc', PygmentsDoc)
diff --git a/pygments/style.py b/pygments/style.py
new file mode 100644
index 0000000..8faa4dc
--- /dev/null
+++ b/pygments/style.py
@@ -0,0 +1,197 @@
+"""
+ pygments.style
+ ~~~~~~~~~~~~~~
+
+ Basic style object.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.token import Token, STANDARD_TYPES
+
+# Default mapping of ansixxx to RGB colors.
+_ansimap = {
+ # dark
+ 'ansiblack': '000000',
+ 'ansired': '7f0000',
+ 'ansigreen': '007f00',
+ 'ansiyellow': '7f7fe0',
+ 'ansiblue': '00007f',
+ 'ansimagenta': '7f007f',
+ 'ansicyan': '007f7f',
+ 'ansigray': 'e5e5e5',
+ # normal
+ 'ansibrightblack': '555555',
+ 'ansibrightred': 'ff0000',
+ 'ansibrightgreen': '00ff00',
+ 'ansibrightyellow': 'ffff00',
+ 'ansibrightblue': '0000ff',
+ 'ansibrightmagenta': 'ff00ff',
+ 'ansibrightcyan': '00ffff',
+ 'ansiwhite': 'ffffff',
+}
+# mapping of deprecated #ansixxx colors to new color names
+_deprecated_ansicolors = {
+ # dark
+ '#ansiblack': 'ansiblack',
+ '#ansidarkred': 'ansired',
+ '#ansidarkgreen': 'ansigreen',
+ '#ansibrown': 'ansiyellow',
+ '#ansidarkblue': 'ansiblue',
+ '#ansipurple': 'ansimagenta',
+ '#ansiteal': 'ansicyan',
+ '#ansilightgray': 'ansigray',
+ # normal
+ '#ansidarkgray': 'ansibrightblack',
+ '#ansired': 'ansibrightred',
+ '#ansigreen': 'ansibrightgreen',
+ '#ansiyellow': 'ansibrightyellow',
+ '#ansiblue': 'ansibrightblue',
+ '#ansifuchsia': 'ansibrightmagenta',
+ '#ansiturquoise': 'ansibrightcyan',
+ '#ansiwhite': 'ansiwhite',
+}
+ansicolors = set(_ansimap)
+
+
+class StyleMeta(type):
+
+ def __new__(mcs, name, bases, dct):
+ obj = type.__new__(mcs, name, bases, dct)
+ for token in STANDARD_TYPES:
+ if token not in obj.styles:
+ obj.styles[token] = ''
+
+ def colorformat(text):
+ if text in ansicolors:
+ return text
+ if text[0:1] == '#':
+ col = text[1:]
+ if len(col) == 6:
+ return col
+ elif len(col) == 3:
+ return col[0] * 2 + col[1] * 2 + col[2] * 2
+ elif text == '':
+ return ''
+ elif text.startswith('var') or text.startswith('calc'):
+ return text
+ assert False, "wrong color format %r" % text
+
+ _styles = obj._styles = {}
+
+ for ttype in obj.styles:
+ for token in ttype.split():
+ if token in _styles:
+ continue
+ ndef = _styles.get(token.parent, None)
+ styledefs = obj.styles.get(token, '').split()
+ if not ndef or token is None:
+ ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
+ elif 'noinherit' in styledefs and token is not Token:
+ ndef = _styles[Token][:]
+ else:
+ ndef = ndef[:]
+ _styles[token] = ndef
+ for styledef in obj.styles.get(token, '').split():
+ if styledef == 'noinherit':
+ pass
+ elif styledef == 'bold':
+ ndef[1] = 1
+ elif styledef == 'nobold':
+ ndef[1] = 0
+ elif styledef == 'italic':
+ ndef[2] = 1
+ elif styledef == 'noitalic':
+ ndef[2] = 0
+ elif styledef == 'underline':
+ ndef[3] = 1
+ elif styledef == 'nounderline':
+ ndef[3] = 0
+ elif styledef[:3] == 'bg:':
+ ndef[4] = colorformat(styledef[3:])
+ elif styledef[:7] == 'border:':
+ ndef[5] = colorformat(styledef[7:])
+ elif styledef == 'roman':
+ ndef[6] = 1
+ elif styledef == 'sans':
+ ndef[7] = 1
+ elif styledef == 'mono':
+ ndef[8] = 1
+ else:
+ ndef[0] = colorformat(styledef)
+
+ return obj
+
+ def style_for_token(cls, token):
+ t = cls._styles[token]
+ ansicolor = bgansicolor = None
+ color = t[0]
+ if color in _deprecated_ansicolors:
+ color = _deprecated_ansicolors[color]
+ if color in ansicolors:
+ ansicolor = color
+ color = _ansimap[color]
+ bgcolor = t[4]
+ if bgcolor in _deprecated_ansicolors:
+ bgcolor = _deprecated_ansicolors[bgcolor]
+ if bgcolor in ansicolors:
+ bgansicolor = bgcolor
+ bgcolor = _ansimap[bgcolor]
+
+ return {
+ 'color': color or None,
+ 'bold': bool(t[1]),
+ 'italic': bool(t[2]),
+ 'underline': bool(t[3]),
+ 'bgcolor': bgcolor or None,
+ 'border': t[5] or None,
+ 'roman': bool(t[6]) or None,
+ 'sans': bool(t[7]) or None,
+ 'mono': bool(t[8]) or None,
+ 'ansicolor': ansicolor,
+ 'bgansicolor': bgansicolor,
+ }
+
+ def list_styles(cls):
+ return list(cls)
+
+ def styles_token(cls, ttype):
+ return ttype in cls._styles
+
+ def __iter__(cls):
+ for token in cls._styles:
+ yield token, cls.style_for_token(token)
+
+ def __len__(cls):
+ return len(cls._styles)
+
+
+class Style(metaclass=StyleMeta):
+
+ #: overall background color (``None`` means transparent)
+ background_color = '#ffffff'
+
+ #: highlight background color
+ highlight_color = '#ffffcc'
+
+ #: line number font color
+ line_number_color = 'inherit'
+
+ #: line number background color
+ line_number_background_color = 'transparent'
+
+ #: special line number font color
+ line_number_special_color = '#000000'
+
+ #: special line number background color
+ line_number_special_background_color = '#ffffc0'
+
+ #: Style definitions for individual token types.
+ styles = {}
+
+ # Attribute for lexers defined within Pygments. If set
+ # to True, the style is not shown in the style gallery
+ # on the website. This is intended for language-specific
+ # styles.
+ web_style_gallery_exclude = False
diff --git a/pygments/styles/__init__.py b/pygments/styles/__init__.py
new file mode 100644
index 0000000..be704b4
--- /dev/null
+++ b/pygments/styles/__init__.py
@@ -0,0 +1,97 @@
+"""
+ pygments.styles
+ ~~~~~~~~~~~~~~~
+
+ Contains built-in styles.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.plugin import find_plugin_styles
+from pygments.util import ClassNotFound
+
+
+#: Maps style names to 'submodule::classname'.
+STYLE_MAP = {
+ 'default': 'default::DefaultStyle',
+ 'emacs': 'emacs::EmacsStyle',
+ 'friendly': 'friendly::FriendlyStyle',
+ 'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle',
+ 'colorful': 'colorful::ColorfulStyle',
+ 'autumn': 'autumn::AutumnStyle',
+ 'murphy': 'murphy::MurphyStyle',
+ 'manni': 'manni::ManniStyle',
+ 'material': 'material::MaterialStyle',
+ 'monokai': 'monokai::MonokaiStyle',
+ 'perldoc': 'perldoc::PerldocStyle',
+ 'pastie': 'pastie::PastieStyle',
+ 'borland': 'borland::BorlandStyle',
+ 'trac': 'trac::TracStyle',
+ 'native': 'native::NativeStyle',
+ 'fruity': 'fruity::FruityStyle',
+ 'bw': 'bw::BlackWhiteStyle',
+ 'vim': 'vim::VimStyle',
+ 'vs': 'vs::VisualStudioStyle',
+ 'tango': 'tango::TangoStyle',
+ 'rrt': 'rrt::RrtStyle',
+ 'xcode': 'xcode::XcodeStyle',
+ 'igor': 'igor::IgorStyle',
+ 'paraiso-light': 'paraiso_light::ParaisoLightStyle',
+ 'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle',
+ 'lovelace': 'lovelace::LovelaceStyle',
+ 'algol': 'algol::AlgolStyle',
+ 'algol_nu': 'algol_nu::Algol_NuStyle',
+ 'arduino': 'arduino::ArduinoStyle',
+ 'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
+ 'abap': 'abap::AbapStyle',
+ 'solarized-dark': 'solarized::SolarizedDarkStyle',
+ 'solarized-light': 'solarized::SolarizedLightStyle',
+ 'sas': 'sas::SasStyle',
+ 'staroffice' : 'staroffice::StarofficeStyle',
+ 'stata': 'stata_light::StataLightStyle',
+ 'stata-light': 'stata_light::StataLightStyle',
+ 'stata-dark': 'stata_dark::StataDarkStyle',
+ 'inkpot': 'inkpot::InkPotStyle',
+ 'zenburn': 'zenburn::ZenburnStyle',
+ 'gruvbox-dark': 'gruvbox::GruvboxDarkStyle',
+ 'gruvbox-light': 'gruvbox::GruvboxLightStyle',
+ 'dracula': 'dracula::DraculaStyle',
+ 'one-dark': 'onedark::OneDarkStyle',
+ 'lilypond' : 'lilypond::LilyPondStyle',
+ 'nord': 'nord::NordStyle',
+ 'nord-darker': 'nord::NordDarkerStyle',
+ 'github-dark': 'gh_dark::GhDarkStyle'
+}
+
+
+def get_style_by_name(name):
+ if name in STYLE_MAP:
+ mod, cls = STYLE_MAP[name].split('::')
+ builtin = "yes"
+ else:
+ for found_name, style in find_plugin_styles():
+ if name == found_name:
+ return style
+ # perhaps it got dropped into our styles package
+ builtin = ""
+ mod = name
+ cls = name.title() + "Style"
+
+ try:
+ mod = __import__('pygments.styles.' + mod, None, None, [cls])
+ except ImportError:
+ raise ClassNotFound("Could not find style module %r" % mod +
+ (builtin and ", though it should be builtin") + ".")
+ try:
+ return getattr(mod, cls)
+ except AttributeError:
+ raise ClassNotFound("Could not find style class %r in style module." % cls)
+
+
+def get_all_styles():
+ """Return a generator for all styles by name,
+ both builtin and plugin."""
+ yield from STYLE_MAP
+ for name, _ in find_plugin_styles():
+ yield name
diff --git a/pygments/styles/abap.py b/pygments/styles/abap.py
new file mode 100644
index 0000000..61a84e4
--- /dev/null
+++ b/pygments/styles/abap.py
@@ -0,0 +1,28 @@
+"""
+ pygments.styles.abap
+ ~~~~~~~~~~~~~~~~~~~~
+
+ ABAP workbench like style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator
+
+
+class AbapStyle(Style):
+
+ styles = {
+ Comment: 'italic #888',
+ Comment.Special: '#888',
+ Keyword: '#00f',
+ Operator.Word: '#00f',
+ Name: '#000',
+ Number: '#3af',
+ String: '#5a2',
+
+ Error: '#F00',
+ }
diff --git a/pygments/styles/algol.py b/pygments/styles/algol.py
new file mode 100644
index 0000000..e2d8796
--- /dev/null
+++ b/pygments/styles/algol.py
@@ -0,0 +1,61 @@
+"""
+ pygments.styles.algol
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Algol publication style.
+
+ This style renders source code for publication of algorithms in
+ scientific papers and academic texts, where its format is frequently used.
+
+ It is based on the style of the revised Algol-60 language report[1].
+
+ o No colours, only black, white and shades of grey are used.
+ o Keywords are rendered in lowercase underline boldface.
+ o Builtins are rendered in lowercase boldface italic.
+ o Docstrings and pragmas are rendered in dark grey boldface.
+ o Library identifiers are rendered in dark grey boldface italic.
+ o Comments are rendered in grey italic.
+
+ To render keywords without underlining, refer to the `Algol_Nu` style.
+
+ For lowercase conversion of keywords and builtins in languages where
+ these are not or might not be lowercase, a supporting lexer is required.
+ The Algol and Modula-2 lexers automatically convert to lowercase whenever
+ this style is selected.
+
+ [1] `Revised Report on the Algorithmic Language Algol-60 <http://www.masswerk.at/algol60/report.htm>`
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Operator
+
+
+class AlgolStyle(Style):
+
+ background_color = "#ffffff"
+
+ styles = {
+ Comment: "italic #888",
+ Comment.Preproc: "bold noitalic #888",
+ Comment.Special: "bold noitalic #888",
+
+ Keyword: "underline bold",
+ Keyword.Declaration: "italic",
+
+ Name.Builtin: "bold italic",
+ Name.Builtin.Pseudo: "bold italic",
+ Name.Namespace: "bold italic #666",
+ Name.Class: "bold italic #666",
+ Name.Function: "bold italic #666",
+ Name.Variable: "bold italic #666",
+ Name.Constant: "bold italic #666",
+
+ Operator.Word: "bold",
+
+ String: "italic #666",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/algol_nu.py b/pygments/styles/algol_nu.py
new file mode 100644
index 0000000..f47884d
--- /dev/null
+++ b/pygments/styles/algol_nu.py
@@ -0,0 +1,61 @@
+"""
+ pygments.styles.algol_nu
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Algol publication style without underlining of keywords.
+
+ This style renders source code for publication of algorithms in
+ scientific papers and academic texts, where its format is frequently used.
+
+ It is based on the style of the revised Algol-60 language report[1].
+
+ o No colours, only black, white and shades of grey are used.
+ o Keywords are rendered in lowercase boldface.
+ o Builtins are rendered in lowercase boldface italic.
+ o Docstrings and pragmas are rendered in dark grey boldface.
+ o Library identifiers are rendered in dark grey boldface italic.
+ o Comments are rendered in grey italic.
+
+ To render keywords with underlining, refer to the `Algol` style.
+
+ For lowercase conversion of keywords and builtins in languages where
+ these are not or might not be lowercase, a supporting lexer is required.
+ The Algol and Modula-2 lexers automatically convert to lowercase whenever
+ this style is selected.
+
+ [1] `Revised Report on the Algorithmic Language Algol-60 <http://www.masswerk.at/algol60/report.htm>`
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Operator
+
+
+class Algol_NuStyle(Style):
+
+ background_color = "#ffffff"
+
+ styles = {
+ Comment: "italic #888",
+ Comment.Preproc: "bold noitalic #888",
+ Comment.Special: "bold noitalic #888",
+
+ Keyword: "bold",
+ Keyword.Declaration: "italic",
+
+ Name.Builtin: "bold italic",
+ Name.Builtin.Pseudo: "bold italic",
+ Name.Namespace: "bold italic #666",
+ Name.Class: "bold italic #666",
+ Name.Function: "bold italic #666",
+ Name.Variable: "bold italic #666",
+ Name.Constant: "bold italic #666",
+
+ Operator.Word: "bold",
+
+ String: "italic #666",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/arduino.py b/pygments/styles/arduino.py
new file mode 100644
index 0000000..a86ab05
--- /dev/null
+++ b/pygments/styles/arduino.py
@@ -0,0 +1,96 @@
+"""
+ pygments.styles.arduino
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Arduino® Syntax highlighting style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class ArduinoStyle(Style):
+ """
+ The Arduino® language style. This style is designed to highlight the
+ Arduino source code, so expect the best results with it.
+ """
+
+ background_color = "#ffffff"
+
+ styles = {
+ Whitespace: "", # class: 'w'
+ Error: "#a61717", # class: 'err'
+
+ Comment: "#95a5a6", # class: 'c'
+ Comment.Multiline: "", # class: 'cm'
+ Comment.Preproc: "#728E00", # class: 'cp'
+ Comment.Single: "", # class: 'c1'
+ Comment.Special: "", # class: 'cs'
+
+ Keyword: "#728E00", # class: 'k'
+ Keyword.Constant: "#00979D", # class: 'kc'
+ Keyword.Declaration: "", # class: 'kd'
+ Keyword.Namespace: "", # class: 'kn'
+ Keyword.Pseudo: "#00979D", # class: 'kp'
+ Keyword.Reserved: "#00979D", # class: 'kr'
+ Keyword.Type: "#00979D", # class: 'kt'
+
+ Operator: "#728E00", # class: 'o'
+ Operator.Word: "", # class: 'ow'
+
+ Name: "#434f54", # class: 'n'
+ Name.Attribute: "", # class: 'na'
+ Name.Builtin: "#728E00", # class: 'nb'
+ Name.Builtin.Pseudo: "", # class: 'bp'
+ Name.Class: "", # class: 'nc'
+ Name.Constant: "", # class: 'no'
+ Name.Decorator: "", # class: 'nd'
+ Name.Entity: "", # class: 'ni'
+ Name.Exception: "", # class: 'ne'
+ Name.Function: "#D35400", # class: 'nf'
+ Name.Property: "", # class: 'py'
+ Name.Label: "", # class: 'nl'
+ Name.Namespace: "", # class: 'nn'
+ Name.Other: "#728E00", # class: 'nx'
+ Name.Tag: "", # class: 'nt'
+ Name.Variable: "", # class: 'nv'
+ Name.Variable.Class: "", # class: 'vc'
+ Name.Variable.Global: "", # class: 'vg'
+ Name.Variable.Instance: "", # class: 'vi'
+
+ Number: "#8A7B52", # class: 'm'
+ Number.Float: "", # class: 'mf'
+ Number.Hex: "", # class: 'mh'
+ Number.Integer: "", # class: 'mi'
+ Number.Integer.Long: "", # class: 'il'
+ Number.Oct: "", # class: 'mo'
+
+ String: "#7F8C8D", # class: 's'
+ String.Backtick: "", # class: 'sb'
+ String.Char: "", # class: 'sc'
+ String.Doc: "", # class: 'sd'
+ String.Double: "", # class: 's2'
+ String.Escape: "", # class: 'se'
+ String.Heredoc: "", # class: 'sh'
+ String.Interpol: "", # class: 'si'
+ String.Other: "", # class: 'sx'
+ String.Regex: "", # class: 'sr'
+ String.Single: "", # class: 's1'
+ String.Symbol: "", # class: 'ss'
+
+ Generic: "", # class: 'g'
+ Generic.Deleted: "", # class: 'gd',
+ Generic.Emph: "", # class: 'ge'
+ Generic.Error: "", # class: 'gr'
+ Generic.Heading: "", # class: 'gh'
+ Generic.Inserted: "", # class: 'gi'
+ Generic.Output: "", # class: 'go'
+ Generic.Prompt: "", # class: 'gp'
+ Generic.Strong: "", # class: 'gs'
+ Generic.Subheading: "", # class: 'gu'
+ Generic.Traceback: "", # class: 'gt'
+ }
diff --git a/pygments/styles/autumn.py b/pygments/styles/autumn.py
new file mode 100644
index 0000000..7310a11
--- /dev/null
+++ b/pygments/styles/autumn.py
@@ -0,0 +1,62 @@
+"""
+ pygments.styles.autumn
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ A colorful style, inspired by the terminal highlighting style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class AutumnStyle(Style):
+ """
+ A colorful style, inspired by the terminal highlighting style.
+ """
+
+ styles = {
+ Whitespace: '#bbbbbb',
+
+ Comment: 'italic #aaaaaa',
+ Comment.Preproc: 'noitalic #4c8317',
+ Comment.Special: 'italic #0000aa',
+
+ Keyword: '#0000aa',
+ Keyword.Type: '#00aaaa',
+
+ Operator.Word: '#0000aa',
+
+ Name.Builtin: '#00aaaa',
+ Name.Function: '#00aa00',
+ Name.Class: 'underline #00aa00',
+ Name.Namespace: 'underline #00aaaa',
+ Name.Variable: '#aa0000',
+ Name.Constant: '#aa0000',
+ Name.Entity: 'bold #800',
+ Name.Attribute: '#1e90ff',
+ Name.Tag: 'bold #1e90ff',
+ Name.Decorator: '#888888',
+
+ String: '#aa5500',
+ String.Symbol: '#0000aa',
+ String.Regex: '#009999',
+
+ Number: '#009999',
+
+ Generic.Heading: 'bold #000080',
+ Generic.Subheading: 'bold #800080',
+ Generic.Deleted: '#aa0000',
+ Generic.Inserted: '#00aa00',
+ Generic.Error: '#aa0000',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#555555',
+ Generic.Output: '#888888',
+ Generic.Traceback: '#aa0000',
+
+ Error: '#F00 bg:#FAA'
+ }
diff --git a/pygments/styles/borland.py b/pygments/styles/borland.py
new file mode 100644
index 0000000..84cde2b
--- /dev/null
+++ b/pygments/styles/borland.py
@@ -0,0 +1,48 @@
+"""
+ pygments.styles.borland
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Style similar to the style used in the Borland IDEs.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class BorlandStyle(Style):
+ """
+ Style similar to the style used in the borland IDEs.
+ """
+
+ styles = {
+ Whitespace: '#bbbbbb',
+
+ Comment: 'italic #008800',
+ Comment.Preproc: 'noitalic #008080',
+ Comment.Special: 'noitalic bold',
+
+ String: '#0000FF',
+ String.Char: '#800080',
+ Number: '#0000FF',
+ Keyword: 'bold #000080',
+ Operator.Word: 'bold',
+ Name.Tag: 'bold #000080',
+ Name.Attribute: '#FF0000',
+
+ Generic.Heading: '#999999',
+ Generic.Subheading: '#aaaaaa',
+ Generic.Deleted: 'bg:#ffdddd #000000',
+ Generic.Inserted: 'bg:#ddffdd #000000',
+ Generic.Error: '#aa0000',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#555555',
+ Generic.Output: '#888888',
+ Generic.Traceback: '#aa0000',
+
+ Error: 'bg:#e3d2d2 #a61717'
+ }
diff --git a/pygments/styles/bw.py b/pygments/styles/bw.py
new file mode 100644
index 0000000..01e4a9c
--- /dev/null
+++ b/pygments/styles/bw.py
@@ -0,0 +1,47 @@
+"""
+ pygments.styles.bw
+ ~~~~~~~~~~~~~~~~~~
+
+ Simple black/white only style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Operator, Generic
+
+
+class BlackWhiteStyle(Style):
+
+ background_color = "#ffffff"
+
+ styles = {
+ Comment: "italic",
+ Comment.Preproc: "noitalic",
+
+ Keyword: "bold",
+ Keyword.Pseudo: "nobold",
+ Keyword.Type: "nobold",
+
+ Operator.Word: "bold",
+
+ Name.Class: "bold",
+ Name.Namespace: "bold",
+ Name.Exception: "bold",
+ Name.Entity: "bold",
+ Name.Tag: "bold",
+
+ String: "italic",
+ String.Interpol: "bold",
+ String.Escape: "bold",
+
+ Generic.Heading: "bold",
+ Generic.Subheading: "bold",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/colorful.py b/pygments/styles/colorful.py
new file mode 100644
index 0000000..0ce69e4
--- /dev/null
+++ b/pygments/styles/colorful.py
@@ -0,0 +1,78 @@
+"""
+ pygments.styles.colorful
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A colorful style, inspired by CodeRay.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class ColorfulStyle(Style):
+ """
+ A colorful style, inspired by CodeRay.
+ """
+
+ styles = {
+ Whitespace: "#bbbbbb",
+
+ Comment: "#888",
+ Comment.Preproc: "#579",
+ Comment.Special: "bold #cc0000",
+
+ Keyword: "bold #080",
+ Keyword.Pseudo: "#038",
+ Keyword.Type: "#339",
+
+ Operator: "#333",
+ Operator.Word: "bold #000",
+
+ Name.Builtin: "#007020",
+ Name.Function: "bold #06B",
+ Name.Class: "bold #B06",
+ Name.Namespace: "bold #0e84b5",
+ Name.Exception: "bold #F00",
+ Name.Variable: "#963",
+ Name.Variable.Instance: "#33B",
+ Name.Variable.Class: "#369",
+ Name.Variable.Global: "bold #d70",
+ Name.Constant: "bold #036",
+ Name.Label: "bold #970",
+ Name.Entity: "bold #800",
+ Name.Attribute: "#00C",
+ Name.Tag: "#070",
+ Name.Decorator: "bold #555",
+
+ String: "bg:#fff0f0",
+ String.Char: "#04D bg:",
+ String.Doc: "#D42 bg:",
+ String.Interpol: "bg:#eee",
+ String.Escape: "bold #666",
+ String.Regex: "bg:#fff0ff #000",
+ String.Symbol: "#A60 bg:",
+ String.Other: "#D20",
+
+ Number: "bold #60E",
+ Number.Integer: "bold #00D",
+ Number.Float: "bold #60E",
+ Number.Hex: "bold #058",
+ Number.Oct: "bold #40E",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#A00000",
+ Generic.Inserted: "#00A000",
+ Generic.Error: "#FF0000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #c65d09",
+ Generic.Output: "#888",
+ Generic.Traceback: "#04D",
+
+ Error: "#F00 bg:#FAA"
+ }
diff --git a/pygments/styles/default.py b/pygments/styles/default.py
new file mode 100644
index 0000000..d51dbe8
--- /dev/null
+++ b/pygments/styles/default.py
@@ -0,0 +1,71 @@
+"""
+ pygments.styles.default
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ The default highlighting style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class DefaultStyle(Style):
+ """
+ The default style (inspired by Emacs 22).
+ """
+
+ background_color = "#f8f8f8"
+
+ styles = {
+ Whitespace: "#bbbbbb",
+ Comment: "italic #3D7B7B",
+ Comment.Preproc: "noitalic #9C6500",
+
+ #Keyword: "bold #AA22FF",
+ Keyword: "bold #008000",
+ Keyword.Pseudo: "nobold",
+ Keyword.Type: "nobold #B00040",
+
+ Operator: "#666666",
+ Operator.Word: "bold #AA22FF",
+
+ Name.Builtin: "#008000",
+ Name.Function: "#0000FF",
+ Name.Class: "bold #0000FF",
+ Name.Namespace: "bold #0000FF",
+ Name.Exception: "bold #CB3F38",
+ Name.Variable: "#19177C",
+ Name.Constant: "#880000",
+ Name.Label: "#767600",
+ Name.Entity: "bold #717171",
+ Name.Attribute: "#687822",
+ Name.Tag: "bold #008000",
+ Name.Decorator: "#AA22FF",
+
+ String: "#BA2121",
+ String.Doc: "italic",
+ String.Interpol: "bold #A45A77",
+ String.Escape: "bold #AA5D1F",
+ String.Regex: "#A45A77",
+ #String.Symbol: "#B8860B",
+ String.Symbol: "#19177C",
+ String.Other: "#008000",
+ Number: "#666666",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#A00000",
+ Generic.Inserted: "#008400",
+ Generic.Error: "#E40000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #000080",
+ Generic.Output: "#717171",
+ Generic.Traceback: "#04D",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/dracula.py b/pygments/styles/dracula.py
new file mode 100644
index 0000000..9c2a7d1
--- /dev/null
+++ b/pygments/styles/dracula.py
@@ -0,0 +1,102 @@
+"""
+ pygments.styles.dracula
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Pygments version of `Dracula` from https://github.com/dracula/dracula-theme.
+
+ Based on the Dracula Theme for pygments by Chris Bracco.
+ See https://github.com/dracula/pygments/tree/fee9ed5613d1086bc01b9d0a5a0e9867a009f571
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Literal, \
+ Number, Operator, Other, Punctuation, Text, Generic, Whitespace
+
+
+class DraculaStyle(Style):
+
+ background_color = "#282a36"
+ highlight_color = "#44475a"
+ line_number_color = "#f1fa8c"
+ line_number_background_color = "#44475a"
+ line_number_special_color = "#50fa7b"
+ line_number_special_background_color = "#6272a4"
+
+ styles = {
+ Whitespace: "#f8f8f2",
+
+ Comment: "#6272a4",
+ Comment.Hashbang: "#6272a4",
+ Comment.Multiline: "#6272a4",
+ Comment.Preproc: "#ff79c6",
+ Comment.Single: "#6272a4",
+ Comment.Special: "#6272a4",
+
+ Generic: "#f8f8f2",
+ Generic.Deleted: "#8b080b",
+ Generic.Emph: "#f8f8f2 underline",
+ Generic.Error: "#f8f8f2",
+ Generic.Heading: "#f8f8f2 bold",
+ Generic.Inserted: "#f8f8f2 bold",
+ Generic.Output: "#44475a",
+ Generic.Prompt: "#f8f8f2",
+ Generic.Strong: "#f8f8f2",
+ Generic.Subheading: "#f8f8f2 bold",
+ Generic.Traceback: "#f8f8f2",
+
+ Error: "#f8f8f2",
+ Keyword: "#ff79c6",
+ Keyword.Constant: "#ff79c6",
+ Keyword.Declaration: "#8be9fd italic",
+ Keyword.Namespace: "#ff79c6",
+ Keyword.Pseudo: "#ff79c6",
+ Keyword.Reserved: "#ff79c6",
+ Keyword.Type: "#8be9fd",
+ Literal: "#f8f8f2",
+ Literal.Date: "#f8f8f2",
+ Name: "#f8f8f2",
+ Name.Attribute: "#50fa7b",
+ Name.Builtin: "#8be9fd italic",
+ Name.Builtin.Pseudo: "#f8f8f2",
+ Name.Class: "#50fa7b",
+ Name.Constant: "#f8f8f2",
+ Name.Decorator: "#f8f8f2",
+ Name.Entity: "#f8f8f2",
+ Name.Exception: "#f8f8f2",
+ Name.Function: "#50fa7b",
+ Name.Label: "#8be9fd italic",
+ Name.Namespace: "#f8f8f2",
+ Name.Other: "#f8f8f2",
+ Name.Tag: "#ff79c6",
+ Name.Variable: "#8be9fd italic",
+ Name.Variable.Class: "#8be9fd italic",
+ Name.Variable.Global: "#8be9fd italic",
+ Name.Variable.Instance: "#8be9fd italic",
+ Number: "#ffb86c",
+ Number.Bin: "#ffb86c",
+ Number.Float: "#ffb86c",
+ Number.Hex: "#ffb86c",
+ Number.Integer: "#ffb86c",
+ Number.Integer.Long: "#ffb86c",
+ Number.Oct: "#ffb86c",
+ Operator: "#ff79c6",
+ Operator.Word: "#ff79c6",
+ Other: "#f8f8f2",
+ Punctuation: "#f8f8f2",
+ String: "#bd93f9",
+ String.Backtick: "#bd93f9",
+ String.Char: "#bd93f9",
+ String.Doc: "#bd93f9",
+ String.Double: "#bd93f9",
+ String.Escape: "#bd93f9",
+ String.Heredoc: "#bd93f9",
+ String.Interpol: "#bd93f9",
+ String.Other: "#bd93f9",
+ String.Regex: "#bd93f9",
+ String.Single: "#bd93f9",
+ String.Symbol: "#bd93f9",
+ Text: "#f8f8f2",
+ }
diff --git a/pygments/styles/emacs.py b/pygments/styles/emacs.py
new file mode 100644
index 0000000..6f46b47
--- /dev/null
+++ b/pygments/styles/emacs.py
@@ -0,0 +1,70 @@
+"""
+ pygments.styles.emacs
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ A highlighting style for Pygments, inspired by Emacs.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class EmacsStyle(Style):
+ """
+ The default style (inspired by Emacs 22).
+ """
+
+ background_color = "#f8f8f8"
+
+ styles = {
+ Whitespace: "#bbbbbb",
+ Comment: "italic #008800",
+ Comment.Preproc: "noitalic",
+ Comment.Special: "noitalic bold",
+
+ Keyword: "bold #AA22FF",
+ Keyword.Pseudo: "nobold",
+ Keyword.Type: "bold #00BB00",
+
+ Operator: "#666666",
+ Operator.Word: "bold #AA22FF",
+
+ Name.Builtin: "#AA22FF",
+ Name.Function: "#00A000",
+ Name.Class: "#0000FF",
+ Name.Namespace: "bold #0000FF",
+ Name.Exception: "bold #D2413A",
+ Name.Variable: "#B8860B",
+ Name.Constant: "#880000",
+ Name.Label: "#A0A000",
+ Name.Entity: "bold #999999",
+ Name.Attribute: "#BB4444",
+ Name.Tag: "bold #008000",
+ Name.Decorator: "#AA22FF",
+
+ String: "#BB4444",
+ String.Doc: "italic",
+ String.Interpol: "bold #BB6688",
+ String.Escape: "bold #BB6622",
+ String.Regex: "#BB6688",
+ String.Symbol: "#B8860B",
+ String.Other: "#008000",
+ Number: "#666666",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#A00000",
+ Generic.Inserted: "#00A000",
+ Generic.Error: "#FF0000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #000080",
+ Generic.Output: "#888",
+ Generic.Traceback: "#04D",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/friendly.py b/pygments/styles/friendly.py
new file mode 100644
index 0000000..27a4f6e
--- /dev/null
+++ b/pygments/styles/friendly.py
@@ -0,0 +1,71 @@
+"""
+ pygments.styles.friendly
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A modern style based on the VIM pyte theme.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class FriendlyStyle(Style):
+ """
+ A modern style based on the VIM pyte theme.
+ """
+
+ background_color = "#f0f0f0"
+ line_number_color = "#666666"
+
+ styles = {
+ Whitespace: "#bbbbbb",
+ Comment: "italic #60a0b0",
+ Comment.Preproc: "noitalic #007020",
+ Comment.Special: "noitalic bg:#fff0f0",
+
+ Keyword: "bold #007020",
+ Keyword.Pseudo: "nobold",
+ Keyword.Type: "nobold #902000",
+
+ Operator: "#666666",
+ Operator.Word: "bold #007020",
+
+ Name.Builtin: "#007020",
+ Name.Function: "#06287e",
+ Name.Class: "bold #0e84b5",
+ Name.Namespace: "bold #0e84b5",
+ Name.Exception: "#007020",
+ Name.Variable: "#bb60d5",
+ Name.Constant: "#60add5",
+ Name.Label: "bold #002070",
+ Name.Entity: "bold #d55537",
+ Name.Attribute: "#4070a0",
+ Name.Tag: "bold #062873",
+ Name.Decorator: "bold #555555",
+
+ String: "#4070a0",
+ String.Doc: "italic",
+ String.Interpol: "italic #70a0d0",
+ String.Escape: "bold #4070a0",
+ String.Regex: "#235388",
+ String.Symbol: "#517918",
+ String.Other: "#c65d09",
+ Number: "#40a070",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#A00000",
+ Generic.Inserted: "#00A000",
+ Generic.Error: "#FF0000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #c65d09",
+ Generic.Output: "#888",
+ Generic.Traceback: "#04D",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/friendly_grayscale.py b/pygments/styles/friendly_grayscale.py
new file mode 100644
index 0000000..76201f2
--- /dev/null
+++ b/pygments/styles/friendly_grayscale.py
@@ -0,0 +1,75 @@
+"""
+ pygments.styles.friendly_grayscale
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A style based on friendly style.
+ The color values of the friendly style have been converted to grayscale
+ using the luminosity value calculated by
+ http://www.workwithcolor.com/color-converter-01.htm
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class FriendlyGrayscaleStyle(Style):
+ """
+ A modern grayscale style based on the friendly style.
+
+ .. versionadded:: 2.11
+ """
+
+ background_color = "#f0f0f0"
+
+ styles = {
+ Whitespace: "#bbbbbb",
+ Comment: "italic #959595",
+ Comment.Preproc: "noitalic #575757",
+ Comment.Special: "noitalic bg:#F4F4F4",
+
+ Keyword: "bold #575757",
+ Keyword.Pseudo: "nobold",
+ Keyword.Type: "nobold #4F4F4F",
+
+ Operator: "#666666",
+ Operator.Word: "bold #575757",
+
+ Name.Builtin: "#575757",
+ Name.Function: "#3F3F3F",
+ Name.Class: "bold #7E7E7E",
+ Name.Namespace: "bold #7E7E7E",
+ Name.Exception: "#575757",
+ Name.Variable: "#9A9A9A",
+ Name.Constant: "#A5A5A5",
+ Name.Label: "bold #363636",
+ Name.Entity: "bold #848484",
+ Name.Attribute: "#707070",
+ Name.Tag: "bold #3B3B3B",
+ Name.Decorator: "bold #555555",
+
+ String: "#717171",
+ String.Doc: "italic",
+ String.Interpol: "italic #9F9F9F",
+ String.Escape: "bold #717171",
+ String.Regex: "#575757",
+ String.Symbol: "#676767",
+ String.Other: "#7E7E7E",
+ Number: "#888888",
+
+ Generic.Heading: "bold #373737",
+ Generic.Subheading: "bold #5A5A5A",
+ Generic.Deleted: "#545454",
+ Generic.Inserted: "#7D7D7D",
+ Generic.Error: "#898989",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #7E7E7E",
+ Generic.Output: "#888888",
+ Generic.Traceback: "#6D6D6D",
+
+ Error: "border:#898989"
+ }
diff --git a/pygments/styles/fruity.py b/pygments/styles/fruity.py
new file mode 100644
index 0000000..2151565
--- /dev/null
+++ b/pygments/styles/fruity.py
@@ -0,0 +1,41 @@
+"""
+ pygments.styles.fruity
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ pygments version of my "fruity" vim theme.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Token, Comment, Name, Keyword, \
+ Generic, Number, String, Whitespace
+
+class FruityStyle(Style):
+ """
+ Pygments version of the "native" vim theme.
+ """
+
+ background_color = '#111111'
+ highlight_color = '#333333'
+
+ styles = {
+ Whitespace: '#888888',
+ Token: '#ffffff',
+ Generic.Output: '#444444 bg:#222222',
+ Keyword: '#fb660a bold',
+ Keyword.Pseudo: 'nobold',
+ Number: '#0086f7 bold',
+ Name.Tag: '#fb660a bold',
+ Name.Variable: '#fb660a',
+ Comment: '#008800 bg:#0f140f italic',
+ Name.Attribute: '#ff0086 bold',
+ String: '#0086d2',
+ Name.Function: '#ff0086 bold',
+ Generic.Heading: '#ffffff bold',
+ Keyword.Type: '#cdcaa9 bold',
+ Generic.Subheading: '#ffffff bold',
+ Name.Constant: '#0086d2',
+ Comment.Preproc: '#ff0007 bold'
+ }
diff --git a/pygments/styles/gh_dark.py b/pygments/styles/gh_dark.py
new file mode 100644
index 0000000..4899879
--- /dev/null
+++ b/pygments/styles/gh_dark.py
@@ -0,0 +1,107 @@
+"""
+ pygments.styles.gh_dark
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Github's Dark-Colorscheme based theme for Pygments
+ Colors extracted from https://github.com/primer/primitives
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, Error, Number, Operator, \
+ Generic, Text, Literal, String, Token
+
+
+# vars are defined to match the defs in
+# - [GitHub's VS Code theme](https://github.com/primer/github-vscode-theme) and
+# - [Primer styles](https://github.com/primer/primitives)
+RED_2 = "#ffa198"
+RED_3 = "#ff7b72"
+RED_9 = "#490202"
+ORANGE_2 = "#ffa657"
+ORANGE_3 = "#f0883e"
+GREEN_1 = "#7ee787"
+GREEN_2 = "#56d364"
+GREEN_7 = "#0f5323"
+BLUE_1 = "#a5d6ff"
+BLUE_2 = "#79c0ff"
+PURPLE_2 = "#d2a8ff"
+GRAY_3 = "#8b949e"
+GRAY_4 = "#6e7681"
+FG_SUBTLE = "#6e7681"
+FG_DEFAULT = "#c9d1d9"
+BG_DEFAULT = "#0d1117"
+DANGER_FG = "#f85149"
+
+
+class GhDarkStyle(Style):
+ """
+ Github's Dark-Colorscheme based theme for Pygments
+ """
+
+ background_color = BG_DEFAULT
+
+ # has transparency in VS Code theme as `colors.codemirror.activelineBg`
+ highlight_color = GRAY_4
+
+ line_number_special_color = FG_DEFAULT
+ line_number_special_background_color = FG_SUBTLE
+
+ line_number_color = GRAY_4
+ line_number_background_color = BG_DEFAULT
+
+ styles = {
+ Token: FG_DEFAULT,
+
+ Error: DANGER_FG,
+
+ Keyword: RED_3,
+ Keyword.Constant: BLUE_2,
+ Keyword.Pseudo: BLUE_2,
+
+ Name: FG_DEFAULT,
+ Name.Class: "bold "+ORANGE_3,
+ Name.Constant: "bold "+BLUE_2,
+ Name.Decorator: 'bold '+PURPLE_2,
+ Name.Entity: ORANGE_2,
+ Name.Exception: "bold "+ORANGE_3,
+ Name.Function: 'bold '+PURPLE_2,
+ Name.Label: "bold "+BLUE_2,
+ Name.Namespace: RED_3,
+ Name.Property: BLUE_2,
+ Name.Tag: GREEN_1,
+ Name.Variable: BLUE_2,
+
+ Literal: BLUE_1,
+ Literal.Date: BLUE_2,
+ String: BLUE_1,
+ String.Affix: BLUE_2,
+ String.Delimiter: BLUE_2,
+ String.Escape: BLUE_2,
+ String.Heredoc: BLUE_2,
+ String.Regex: BLUE_2,
+ Number: BLUE_1,
+
+ Comment: 'italic '+GRAY_3,
+ Comment.Preproc: "bold " + GRAY_3,
+ Comment.Special: "bold italic " + GRAY_3,
+
+ Operator: 'bold ' + RED_3,
+
+ Generic: FG_DEFAULT,
+ Generic.Deleted: f"bg:{RED_9} {RED_2}",
+ Generic.Emph: "italic",
+ Generic.Error: RED_2,
+ Generic.Heading: "bold "+BLUE_2,
+ Generic.Inserted: f'bg:{GREEN_7} {GREEN_2}',
+ Generic.Output: GRAY_3,
+ Generic.Prompt: GRAY_3,
+ Generic.Strong: "bold",
+ Generic.Subheading: BLUE_2,
+ Generic.Traceback: RED_3,
+ Generic.Underline: "underline",
+
+ Text.Whitespace: FG_SUBTLE,
+ }
diff --git a/pygments/styles/gruvbox.py b/pygments/styles/gruvbox.py
new file mode 100644
index 0000000..24c3983
--- /dev/null
+++ b/pygments/styles/gruvbox.py
@@ -0,0 +1,109 @@
+"""
+ pygments.styles.gruvbox
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ pygments version of the "gruvbox" vim theme.
+ https://github.com/morhetz/gruvbox
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Token, Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic
+
+
+class GruvboxDarkStyle(Style):
+ """
+ Pygments version of the "gruvbox" dark vim theme.
+ """
+
+ background_color = '#282828'
+ highlight_color = '#ebdbb2'
+
+ styles = {
+ Token: '#dddddd',
+
+ Comment: 'italic #928374',
+ Comment.PreProc: '#8ec07c',
+ Comment.Special: 'bold italic #ebdbb2',
+
+ Keyword: '#fb4934',
+ Operator.Word: '#fb4934',
+
+ String: '#b8bb26',
+ String.Escape: '#fe8019',
+
+ Number: '#d3869b',
+
+ Name.Builtin: '#fe8019',
+ Name.Variable: '#83a598',
+ Name.Constant: '#d3869b',
+ Name.Class: '#8ec07c',
+ Name.Function: '#8ec07c',
+ Name.Namespace: '#8ec07c',
+ Name.Exception: '#fb4934',
+ Name.Tag: '#8ec07c',
+ Name.Attribute: '#fabd2f',
+ Name.Decorator: '#fb4934',
+
+ Generic.Heading: 'bold #ebdbb2',
+ Generic.Subheading: 'underline #ebdbb2',
+ Generic.Deleted: 'bg:#fb4934 #282828',
+ Generic.Inserted: 'bg:#b8bb26 #282828',
+ Generic.Error: '#fb4934',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#a89984',
+ Generic.Output: '#f2e5bc',
+ Generic.Traceback: '#fb4934',
+
+ Error: 'bg:#fb4934 #282828'
+ }
+
+class GruvboxLightStyle(Style):
+ """
+ Pygments version of the "gruvbox" Light vim theme.
+ """
+
+ background_color = '#fbf1c7'
+ highlight_color = '#3c3836'
+
+ styles = {
+ Comment: 'italic #928374',
+ Comment.PreProc: '#427b58',
+ Comment.Special: 'bold italic #3c3836',
+
+ Keyword: '#9d0006',
+ Operator.Word: '#9d0006',
+
+ String: '#79740e',
+ String.Escape: '#af3a03',
+
+ Number: '#8f3f71',
+
+ Name.Builtin: '#af3a03',
+ Name.Variable: '#076678',
+ Name.Constant: '#8f3f71',
+ Name.Class: '#427b58',
+ Name.Function: '#427b58',
+ Name.Namespace: '#427b58',
+ Name.Exception: '#9d0006',
+ Name.Tag: '#427b58',
+ Name.Attribute: '#b57614',
+ Name.Decorator: '#9d0006',
+
+ Generic.Heading: 'bold #3c3836',
+ Generic.Subheading: 'underline #3c3836',
+ Generic.Deleted: 'bg:#9d0006 #fbf1c7',
+ Generic.Inserted: 'bg:#79740e #fbf1c7',
+ Generic.Error: '#9d0006',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#7c6f64',
+ Generic.Output: '#32302f',
+ Generic.Traceback: '#9d0006',
+
+ Error: 'bg:#9d0006 #fbf1c7'
+ }
diff --git a/pygments/styles/igor.py b/pygments/styles/igor.py
new file mode 100644
index 0000000..c5fc490
--- /dev/null
+++ b/pygments/styles/igor.py
@@ -0,0 +1,27 @@
+"""
+ pygments.styles.igor
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Igor Pro default style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String
+
+
+class IgorStyle(Style):
+ """
+ Pygments version of the official colors for Igor Pro procedures.
+ """
+
+ styles = {
+ Comment: 'italic #FF0000',
+ Keyword: '#0000FF',
+ Name.Function: '#C34E00',
+ Name.Decorator: '#CC00A3',
+ Name.Class: '#007575',
+ String: '#009C00'
+ }
diff --git a/pygments/styles/inkpot.py b/pygments/styles/inkpot.py
new file mode 100644
index 0000000..cf3769d
--- /dev/null
+++ b/pygments/styles/inkpot.py
@@ -0,0 +1,67 @@
+"""
+ pygments.styles.inkpot
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ A highlighting style for Pygments, inspired by the Inkpot theme for VIM.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Text, Other, Keyword, Name, Comment, String, \
+ Error, Number, Operator, Generic, Whitespace, Punctuation
+
+
+class InkPotStyle(Style):
+
+ background_color = "#1e1e27"
+
+ styles = {
+ Text: "#cfbfad",
+ Other: "#cfbfad",
+ Whitespace: "#434357",
+ Comment: "#cd8b00",
+ Comment.Preproc: "#409090",
+ Comment.PreprocFile: "bg:#404040 #ffcd8b",
+ Comment.Special: "#808bed",
+
+ Keyword: "#808bed",
+ Keyword.Pseudo: "nobold",
+ Keyword.Type: "#ff8bff",
+
+ Operator: "#666666",
+
+ Punctuation: "#cfbfad",
+
+ Name: "#cfbfad",
+ Name.Attribute: "#cfbfad",
+ Name.Builtin.Pseudo: '#ffff00',
+ Name.Builtin: "#808bed",
+ Name.Class: "#ff8bff",
+ Name.Constant: "#409090",
+ Name.Decorator: "#409090",
+ Name.Exception: "#ff0000",
+ Name.Function: "#c080d0",
+ Name.Label: "#808bed",
+ Name.Namespace: "#ff0000",
+ Name.Variable: "#cfbfad",
+
+ String: "bg:#404040 #ffcd8b",
+ String.Doc: "#808bed",
+
+ Number: "#f0ad6d",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#A00000",
+ Generic.Inserted: "#00A000",
+ Generic.Error: "#FF0000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #000080",
+ Generic.Output: "#888",
+ Generic.Traceback: "#04D",
+
+ Error: "bg:#6e2e2e #ffffff"
+ }
diff --git a/pygments/styles/lilypond.py b/pygments/styles/lilypond.py
new file mode 100644
index 0000000..b899674
--- /dev/null
+++ b/pygments/styles/lilypond.py
@@ -0,0 +1,56 @@
+"""
+ pygments.styles.lilypond
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ LilyPond-specific style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Token
+
+class LilyPondStyle(Style):
+ """
+ Style for the LilyPond language.
+
+ .. versionadded:: 2.11
+ """
+
+ # Don't show it in the gallery, it's intended for LilyPond
+ # input only and doesn't show good output on Python code.
+ web_style_gallery_exclude = True
+
+ styles = {
+ Token.Text: "",
+ Token.Keyword: "bold",
+ Token.Comment: "italic #A3AAB2",
+ Token.String: "#AB0909",
+ Token.String.Escape: "#C46C6C",
+ Token.String.Symbol: "noinherit",
+ Token.Pitch: "", #"#911520",
+ Token.Number: "#976806", # includes durations
+ # A bare 11 is not distinguishable from a number, so we highlight
+ # the same.
+ Token.ChordModifier: "#976806",
+ Token.Name.Lvalue: "#08547A",
+ Token.Name.BackslashReference: "#08547A",
+ Token.Name.Builtin.MusicCommand: "bold #08547A",
+ Token.Name.Builtin.PaperVariable: "bold #6C5A05",
+ Token.Name.Builtin.HeaderVariable: "bold #6C5A05",
+ Token.Name.Builtin.MusicFunction: "bold #08547A",
+ Token.Name.Builtin.Clef: "bold #08547A",
+ Token.Name.Builtin.Scale: "bold #08547A",
+ Token.Name.Builtin.RepeatType: "#08547A",
+ Token.Name.Builtin.Dynamic: "#68175A",
+ Token.Name.Builtin.Articulation: "#68175A",
+ Token.Name.Builtin.SchemeFunction: "bold #A83401",
+ Token.Name.Builtin.SchemeBuiltin: "bold",
+ Token.Name.Builtin.MarkupCommand: "bold #831E71",
+ Token.Name.Builtin.Context: "bold #038B8B",
+ Token.Name.Builtin.ContextProperty: "#038B8B",
+ Token.Name.Builtin.Grob: "bold #0C7441",
+ Token.Name.Builtin.GrobProperty: "#0C7441",
+ Token.Name.Builtin.Translator: "bold #6200A4",
+ }
diff --git a/pygments/styles/lovelace.py b/pygments/styles/lovelace.py
new file mode 100644
index 0000000..6beedc5
--- /dev/null
+++ b/pygments/styles/lovelace.py
@@ -0,0 +1,94 @@
+"""
+ pygments.styles.lovelace
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lovelace by Miikka Salminen
+
+ Pygments style by Miikka Salminen (https://github.com/miikkas)
+ A desaturated, somewhat subdued style created for the Lovelace interactive
+ learning environment.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Punctuation, Generic, Whitespace
+
+
+class LovelaceStyle(Style):
+ """
+ The style used in Lovelace interactive learning environment. Tries to avoid
+ the "angry fruit salad" effect with desaturated and dim colours.
+ """
+ _KW_BLUE = '#2838b0'
+ _NAME_GREEN = '#388038'
+ _DOC_ORANGE = '#b85820'
+ _OW_PURPLE = '#a848a8'
+ _FUN_BROWN = '#785840'
+ _STR_RED = '#b83838'
+ _CLS_CYAN = '#287088'
+ _ESCAPE_LIME = '#709030'
+ _LABEL_CYAN = '#289870'
+ _EXCEPT_YELLOW = '#908828'
+
+ styles = {
+ Whitespace: '#a89028',
+ Comment: 'italic #888888',
+ Comment.Hashbang: _CLS_CYAN,
+ Comment.Multiline: '#888888',
+ Comment.Preproc: 'noitalic '+_LABEL_CYAN,
+
+ Keyword: _KW_BLUE,
+ Keyword.Constant: 'italic #444444',
+ Keyword.Declaration: 'italic',
+ Keyword.Type: 'italic',
+
+ Operator: '#666666',
+ Operator.Word: _OW_PURPLE,
+
+ Punctuation: '#888888',
+
+ Name.Attribute: _NAME_GREEN,
+ Name.Builtin: _NAME_GREEN,
+ Name.Builtin.Pseudo: 'italic',
+ Name.Class: _CLS_CYAN,
+ Name.Constant: _DOC_ORANGE,
+ Name.Decorator: _CLS_CYAN,
+ Name.Entity: _ESCAPE_LIME,
+ Name.Exception: _EXCEPT_YELLOW,
+ Name.Function: _FUN_BROWN,
+ Name.Function.Magic: _DOC_ORANGE,
+ Name.Label: _LABEL_CYAN,
+ Name.Namespace: _LABEL_CYAN,
+ Name.Tag: _KW_BLUE,
+ Name.Variable: '#b04040',
+ Name.Variable.Global:_EXCEPT_YELLOW,
+ Name.Variable.Magic: _DOC_ORANGE,
+
+ String: _STR_RED,
+ String.Affix: '#444444',
+ String.Char: _OW_PURPLE,
+ String.Delimiter: _DOC_ORANGE,
+ String.Doc: 'italic '+_DOC_ORANGE,
+ String.Escape: _ESCAPE_LIME,
+ String.Interpol: 'underline',
+ String.Other: _OW_PURPLE,
+ String.Regex: _OW_PURPLE,
+
+ Number: '#444444',
+
+ Generic.Deleted: '#c02828',
+ Generic.Emph: 'italic',
+ Generic.Error: '#c02828',
+ Generic.Heading: '#666666',
+ Generic.Subheading: '#444444',
+ Generic.Inserted: _NAME_GREEN,
+ Generic.Output: '#666666',
+ Generic.Prompt: '#444444',
+ Generic.Strong: 'bold',
+ Generic.Traceback: _KW_BLUE,
+
+ Error: 'bg:'+_OW_PURPLE,
+ }
diff --git a/pygments/styles/manni.py b/pygments/styles/manni.py
new file mode 100644
index 0000000..f5fe876
--- /dev/null
+++ b/pygments/styles/manni.py
@@ -0,0 +1,74 @@
+"""
+ pygments.styles.manni
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ A colorful style, inspired by the terminal highlighting style.
+
+ This is a port of the style used in the `php port`_ of pygments
+ by Manni. The style is called 'default' there.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class ManniStyle(Style):
+ """
+ A colorful style, inspired by the terminal highlighting style.
+ """
+
+ background_color = '#f0f3f3'
+
+ styles = {
+ Whitespace: '#bbbbbb',
+ Comment: 'italic #0099FF',
+ Comment.Preproc: 'noitalic #009999',
+ Comment.Special: 'bold',
+
+ Keyword: 'bold #006699',
+ Keyword.Pseudo: 'nobold',
+ Keyword.Type: '#007788',
+
+ Operator: '#555555',
+ Operator.Word: 'bold #000000',
+
+ Name.Builtin: '#336666',
+ Name.Function: '#CC00FF',
+ Name.Class: 'bold #00AA88',
+ Name.Namespace: 'bold #00CCFF',
+ Name.Exception: 'bold #CC0000',
+ Name.Variable: '#003333',
+ Name.Constant: '#336600',
+ Name.Label: '#9999FF',
+ Name.Entity: 'bold #999999',
+ Name.Attribute: '#330099',
+ Name.Tag: 'bold #330099',
+ Name.Decorator: '#9999FF',
+
+ String: '#CC3300',
+ String.Doc: 'italic',
+ String.Interpol: '#AA0000',
+ String.Escape: 'bold #CC3300',
+ String.Regex: '#33AAAA',
+ String.Symbol: '#FFCC33',
+ String.Other: '#CC3300',
+
+ Number: '#FF6600',
+
+ Generic.Heading: 'bold #003300',
+ Generic.Subheading: 'bold #003300',
+ Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
+ Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
+ Generic.Error: '#FF0000',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: 'bold #000099',
+ Generic.Output: '#AAAAAA',
+ Generic.Traceback: '#99CC66',
+
+ Error: 'bg:#FFAAAA #AA0000'
+ }
diff --git a/pygments/styles/material.py b/pygments/styles/material.py
new file mode 100644
index 0000000..7f4e750
--- /dev/null
+++ b/pygments/styles/material.py
@@ -0,0 +1,117 @@
+"""
+ pygments.styles.material
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Mimic the Material theme color scheme.
+
+ https://github.com/material-theme/vsc-material-theme
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Escape, \
+ Error, Text, Number, Operator, Generic, Punctuation, Literal
+
+class MaterialStyle(Style):
+ """
+ This style mimics the Material Theme color scheme.
+ """
+ dark_teal = '#263238'
+ white= '#FFFFFF'
+ black= '#000000'
+ red= '#FF5370'
+ orange= '#F78C6C'
+ yellow= '#FFCB6B'
+ green= '#C3E88D'
+ cyan= '#89DDFF'
+ blue= '#82AAFF'
+ paleblue= '#B2CCD6'
+ purple= '#C792EA'
+ brown= '#C17E70'
+ pink= '#F07178'
+ violet= '#BB80B3'
+ foreground = '#EEFFFF'
+ faded = '#546E7A'
+
+ background_color = dark_teal
+ highlight_color = '#2C3B41'
+ line_number_color = '#37474F'
+ line_number_background_color = dark_teal
+ line_number_special_color = '#607A86'
+ line_number_special_background_color = dark_teal
+
+ styles = {
+ Text: foreground,
+ Escape: cyan,
+ Error: red,
+
+ Keyword: violet,
+ Keyword.Constant: cyan,
+ Keyword.Declaration: violet,
+ Keyword.Namespace: 'italic ' + cyan,
+ Keyword.Pseudo: cyan,
+ Keyword.Type: violet,
+
+ Name: foreground,
+ Name.Attribute: violet,
+ Name.Builtin: blue,
+ Name.Builtin.Pseudo: cyan,
+ Name.Class: yellow,
+ Name.Constant: foreground,
+ Name.Decorator: blue,
+ Name.Entity: cyan,
+ Name.Exception: yellow,
+ Name.Function: blue,
+ Name.Function.Magic: blue,
+ Name.Label: blue,
+ Name.Property: yellow,
+ Name.Namespace: yellow,
+ Name.Other: foreground,
+ Name.Tag: red,
+ Name.Variable: cyan,
+ Name.Variable.Class: cyan,
+ Name.Variable.Global: cyan,
+ Name.Variable.Instance: cyan,
+ Name.Variable.Magic: blue,
+
+ Literal: green,
+ Literal.Date: green,
+
+ String: green,
+ String.Affix: violet,
+ String.Backtick: green,
+ String.Char: green,
+ String.Delimiter: foreground,
+ String.Doc: 'italic ' + faded,
+ String.Double: green,
+ String.Escape: foreground,
+ String.Heredoc: green,
+ String.Interpol: cyan,
+ String.Other: green,
+ String.Regex: cyan,
+ String.Single: green,
+ String.Symbol: cyan,
+
+ Number: orange,
+
+ Operator: cyan,
+ Operator.Word: 'italic ' + cyan,
+
+ Punctuation: cyan,
+
+ Comment: 'italic ' + faded,
+
+ Generic: foreground,
+ Generic.Deleted: red,
+ Generic.Emph: cyan,
+ Generic.Error: red,
+ Generic.Heading: green,
+ Generic.Inserted: green,
+ Generic.Output: faded,
+ Generic.Prompt: yellow,
+ Generic.Strong: red,
+ Generic.Subheading: cyan,
+ Generic.Traceback: red,
+ }
diff --git a/pygments/styles/monokai.py b/pygments/styles/monokai.py
new file mode 100644
index 0000000..610dc0a
--- /dev/null
+++ b/pygments/styles/monokai.py
@@ -0,0 +1,106 @@
+"""
+ pygments.styles.monokai
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Mimic the Monokai color scheme. Based on tango.py.
+
+ http://www.monokai.nl/blog/2006/07/15/textmate-color-theme/
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Token, \
+ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
+
+class MonokaiStyle(Style):
+ """
+ This style mimics the Monokai color scheme.
+ """
+
+ background_color = "#272822"
+ highlight_color = "#49483e"
+
+ styles = {
+ # No corresponding class for the following:
+ Token: "#f8f8f2", # class: ''
+ Whitespace: "", # class: 'w'
+ Error: "#960050 bg:#1e0010", # class: 'err'
+ Other: "", # class 'x'
+
+ Comment: "#75715e", # class: 'c'
+ Comment.Multiline: "", # class: 'cm'
+ Comment.Preproc: "", # class: 'cp'
+ Comment.Single: "", # class: 'c1'
+ Comment.Special: "", # class: 'cs'
+
+ Keyword: "#66d9ef", # class: 'k'
+ Keyword.Constant: "", # class: 'kc'
+ Keyword.Declaration: "", # class: 'kd'
+ Keyword.Namespace: "#f92672", # class: 'kn'
+ Keyword.Pseudo: "", # class: 'kp'
+ Keyword.Reserved: "", # class: 'kr'
+ Keyword.Type: "", # class: 'kt'
+
+ Operator: "#f92672", # class: 'o'
+ Operator.Word: "", # class: 'ow' - like keywords
+
+ Punctuation: "#f8f8f2", # class: 'p'
+
+ Name: "#f8f8f2", # class: 'n'
+ Name.Attribute: "#a6e22e", # class: 'na' - to be revised
+ Name.Builtin: "", # class: 'nb'
+ Name.Builtin.Pseudo: "", # class: 'bp'
+ Name.Class: "#a6e22e", # class: 'nc' - to be revised
+ Name.Constant: "#66d9ef", # class: 'no' - to be revised
+ Name.Decorator: "#a6e22e", # class: 'nd' - to be revised
+ Name.Entity: "", # class: 'ni'
+ Name.Exception: "#a6e22e", # class: 'ne'
+ Name.Function: "#a6e22e", # class: 'nf'
+ Name.Property: "", # class: 'py'
+ Name.Label: "", # class: 'nl'
+ Name.Namespace: "", # class: 'nn' - to be revised
+ Name.Other: "#a6e22e", # class: 'nx'
+ Name.Tag: "#f92672", # class: 'nt' - like a keyword
+ Name.Variable: "", # class: 'nv' - to be revised
+ Name.Variable.Class: "", # class: 'vc' - to be revised
+ Name.Variable.Global: "", # class: 'vg' - to be revised
+ Name.Variable.Instance: "", # class: 'vi' - to be revised
+
+ Number: "#ae81ff", # class: 'm'
+ Number.Float: "", # class: 'mf'
+ Number.Hex: "", # class: 'mh'
+ Number.Integer: "", # class: 'mi'
+ Number.Integer.Long: "", # class: 'il'
+ Number.Oct: "", # class: 'mo'
+
+ Literal: "#ae81ff", # class: 'l'
+ Literal.Date: "#e6db74", # class: 'ld'
+
+ String: "#e6db74", # class: 's'
+ String.Backtick: "", # class: 'sb'
+ String.Char: "", # class: 'sc'
+ String.Doc: "", # class: 'sd' - like a comment
+ String.Double: "", # class: 's2'
+ String.Escape: "#ae81ff", # class: 'se'
+ String.Heredoc: "", # class: 'sh'
+ String.Interpol: "", # class: 'si'
+ String.Other: "", # class: 'sx'
+ String.Regex: "", # class: 'sr'
+ String.Single: "", # class: 's1'
+ String.Symbol: "", # class: 'ss'
+
+
+ Generic: "", # class: 'g'
+ Generic.Deleted: "#f92672", # class: 'gd',
+ Generic.Emph: "italic", # class: 'ge'
+ Generic.Error: "", # class: 'gr'
+ Generic.Heading: "", # class: 'gh'
+ Generic.Inserted: "#a6e22e", # class: 'gi'
+ Generic.Output: "#66d9ef", # class: 'go'
+ Generic.Prompt: "bold #f92672", # class: 'gp'
+ Generic.Strong: "bold", # class: 'gs'
+ Generic.Subheading: "#75715e", # class: 'gu'
+ Generic.Traceback: "", # class: 'gt'
+ }
diff --git a/pygments/styles/murphy.py b/pygments/styles/murphy.py
new file mode 100644
index 0000000..67f4dda
--- /dev/null
+++ b/pygments/styles/murphy.py
@@ -0,0 +1,77 @@
+"""
+ pygments.styles.murphy
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Murphy's style from CodeRay.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class MurphyStyle(Style):
+ """
+ Murphy's style from CodeRay.
+ """
+
+ styles = {
+ Whitespace: "#bbbbbb",
+ Comment: "#666 italic",
+ Comment.Preproc: "#579 noitalic",
+ Comment.Special: "#c00 bold",
+
+ Keyword: "bold #289",
+ Keyword.Pseudo: "#08f",
+ Keyword.Type: "#66f",
+
+ Operator: "#333",
+ Operator.Word: "bold #000",
+
+ Name.Builtin: "#072",
+ Name.Function: "bold #5ed",
+ Name.Class: "bold #e9e",
+ Name.Namespace: "bold #0e84b5",
+ Name.Exception: "bold #F00",
+ Name.Variable: "#036",
+ Name.Variable.Instance: "#aaf",
+ Name.Variable.Class: "#ccf",
+ Name.Variable.Global: "#f84",
+ Name.Constant: "bold #5ed",
+ Name.Label: "bold #970",
+ Name.Entity: "#800",
+ Name.Attribute: "#007",
+ Name.Tag: "#070",
+ Name.Decorator: "bold #555",
+
+ String: "bg:#e0e0ff",
+ String.Char: "#88F bg:",
+ String.Doc: "#D42 bg:",
+ String.Interpol: "bg:#eee",
+ String.Escape: "bold #666",
+ String.Regex: "bg:#e0e0ff #000",
+ String.Symbol: "#fc8 bg:",
+ String.Other: "#f88",
+
+ Number: "bold #60E",
+ Number.Integer: "bold #66f",
+ Number.Float: "bold #60E",
+ Number.Hex: "bold #058",
+ Number.Oct: "bold #40E",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#A00000",
+ Generic.Inserted: "#00A000",
+ Generic.Error: "#FF0000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #c65d09",
+ Generic.Output: "#888",
+ Generic.Traceback: "#04D",
+
+ Error: "#F00 bg:#FAA"
+ }
diff --git a/pygments/styles/native.py b/pygments/styles/native.py
new file mode 100644
index 0000000..fa4687c
--- /dev/null
+++ b/pygments/styles/native.py
@@ -0,0 +1,65 @@
+"""
+ pygments.styles.native
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ pygments version of my "native" vim theme.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Token, Whitespace
+
+
+class NativeStyle(Style):
+ """
+ Pygments version of the "native" vim theme.
+ """
+
+ background_color = '#202020'
+ highlight_color = '#404040'
+ line_number_color = '#aaaaaa'
+
+ styles = {
+ Token: '#d0d0d0',
+ Whitespace: '#666666',
+
+ Comment: 'italic #ababab',
+ Comment.Preproc: 'noitalic bold #cd2828',
+ Comment.Special: 'noitalic bold #e50808 bg:#520000',
+
+ Keyword: 'bold #6ebf26',
+ Keyword.Pseudo: 'nobold',
+ Operator.Word: 'bold #6ebf26',
+
+ String: '#ed9d13',
+ String.Other: '#ffa500',
+
+ Number: '#51b2fd',
+
+ Name.Builtin: '#2fbccd',
+ Name.Variable: '#40ffff',
+ Name.Constant: '#40ffff',
+ Name.Class: 'underline #71adff',
+ Name.Function: '#71adff',
+ Name.Namespace: 'underline #71adff',
+ Name.Exception: '#bbbbbb',
+ Name.Tag: 'bold #6ebf26',
+ Name.Attribute: '#bbbbbb',
+ Name.Decorator: '#ffa500',
+
+ Generic.Heading: 'bold #ffffff',
+ Generic.Subheading: 'underline #ffffff',
+ Generic.Deleted: '#d22323',
+ Generic.Inserted: '#589819',
+ Generic.Error: '#d22323',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#aaaaaa',
+ Generic.Output: '#cccccc',
+ Generic.Traceback: '#d22323',
+
+ Error: 'bg:#e3d2d2 #a61717'
+ }
diff --git a/pygments/styles/nord.py b/pygments/styles/nord.py
new file mode 100644
index 0000000..10a8a54
--- /dev/null
+++ b/pygments/styles/nord.py
@@ -0,0 +1,150 @@
+"""
+ pygments.styles.nord
+ ~~~~~~~~~~~~~~~~~~~~
+
+ pygments version of the "nord" theme by Arctic Ice Studio
+ https://www.nordtheme.com/
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Number, \
+ Operator, Generic, Whitespace, Punctuation, Text, Token
+
+
+class NordStyle(Style):
+ """
+ Pygments version of the "nord" theme by Arctic Ice Studio.
+ """
+
+ line_number_color = "#D8DEE9"
+ line_number_background_color = "#242933"
+ line_number_special_color = "#242933"
+ line_number_special_background_color = "#D8DEE9"
+
+ background_color = "#2E3440"
+ highlight_color = "#3B4252"
+
+ styles = {
+ Token: "#d8dee9",
+
+ Whitespace: '#d8dee9',
+ Punctuation: '#eceff4',
+
+ Comment: 'italic #616e87',
+ Comment.Preproc: '#5e81ac',
+
+ Keyword: 'bold #81a1c1',
+ Keyword.Pseudo: 'nobold #81a1c1',
+ Keyword.Type: 'nobold #81a1c1',
+
+ Operator: 'bold #81a1c1',
+ Operator.Word: 'bold #81a1c1',
+
+ Name: '#d8dee9',
+ Name.Builtin: '#81a1c1',
+ Name.Function: '#88c0d0',
+ Name.Class: '#8fbcbb',
+ Name.Namespace: '#8fbcbb',
+ Name.Exception: '#bf616a',
+ Name.Variable: '#d8dee9',
+ Name.Constant: '#8fbcbb',
+ Name.Entity: '#d08770',
+ Name.Attribute: '#8fbcbb',
+ Name.Tag: '#81a1c1',
+ Name.Decorator: '#d08770',
+
+ String: '#a3be8c',
+ String.Doc: '#616e87',
+ String.Interpol: '#a3be8c',
+ String.Escape: '#ebcb8b',
+ String.Regex: '#ebcb8b',
+ String.Symbol: '#a3be8c',
+ String.Other: '#a3be8c',
+
+ Number: '#b48ead',
+
+ Generic.Heading: 'bold #88c0d0',
+ Generic.Subheading: 'bold #88c0d0',
+ Generic.Deleted: '#bf616a',
+ Generic.Inserted: '#a3be8c',
+ Generic.Error: '#bf616a',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: 'bold #616e88',
+ Generic.Output: '#d8dee9',
+ Generic.Traceback: '#bf616a',
+
+ Error: '#bf616a',
+ Text: '#d8dee9',
+ }
+
+
+class NordDarkerStyle(Style):
+ """
+ Pygments version of a darker "nord" theme by Arctic Ice Studio
+ """
+
+ line_number_color = "#D8DEE9"
+ line_number_background_color = "#242933"
+ line_number_special_color = "#242933"
+ line_number_special_background_color = "#D8DEE9"
+
+ background_color = "#242933"
+ highlight_color = "#3B4252"
+
+ styles = {
+ Token: "#d8dee9",
+
+ Whitespace: '#d8dee9',
+ Punctuation: '#eceff4',
+
+ Comment: 'italic #616e87',
+ Comment.Preproc: '#5e81ac',
+
+ Keyword: 'bold #81a1c1',
+ Keyword.Pseudo: 'nobold #81a1c1',
+ Keyword.Type: 'nobold #81a1c1',
+
+ Operator: 'bold #81a1c1',
+ Operator.Word: 'bold #81a1c1',
+
+ Name: '#d8dee9',
+ Name.Builtin: '#81a1c1',
+ Name.Function: '#88c0d0',
+ Name.Class: '#8fbcbb',
+ Name.Namespace: '#8fbcbb',
+ Name.Exception: '#bf616a',
+ Name.Variable: '#d8dee9',
+ Name.Constant: '#8fbcbb',
+ Name.Entity: '#d08770',
+ Name.Attribute: '#8fbcbb',
+ Name.Tag: '#81a1c1',
+ Name.Decorator: '#d08770',
+
+ String: '#a3be8c',
+ String.Doc: '#616e87',
+ String.Interpol: '#a3be8c',
+ String.Escape: '#ebcb8b',
+ String.Regex: '#ebcb8b',
+ String.Symbol: '#a3be8c',
+ String.Other: '#a3be8c',
+
+ Number: '#b48ead',
+
+ Generic.Heading: 'bold #88c0d0',
+ Generic.Subheading: 'bold #88c0d0',
+ Generic.Deleted: '#bf616a',
+ Generic.Inserted: '#a3be8c',
+ Generic.Error: '#bf616a',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: 'bold #616e88',
+ Generic.Output: '#d8dee9',
+ Generic.Traceback: '#bf616a',
+
+ Error: '#bf616a',
+ Text: '#d8dee9',
+ }
diff --git a/pygments/styles/onedark.py b/pygments/styles/onedark.py
new file mode 100644
index 0000000..21a3ac4
--- /dev/null
+++ b/pygments/styles/onedark.py
@@ -0,0 +1,59 @@
+"""
+ pygments.styles.onedark
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ One Dark Theme for Pygments by Tobias Zoghaib (https://github.com/TobiZog)
+
+ Inspired by one-dark-ui for the code editor Atom
+ (https://atom.io/themes/one-dark-ui).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+ Punctuation, String, Token
+
+
+class OneDarkStyle(Style):
+ """
+ Theme inspired by One Dark Pro for Atom.
+
+ .. versionadded:: 2.11
+ """
+
+ background_color = '#282C34'
+
+ styles = {
+ Token: '#ABB2BF',
+
+ Punctuation: '#ABB2BF',
+ Punctuation.Marker: '#ABB2BF',
+
+ Keyword: '#C678DD',
+ Keyword.Constant: '#E5C07B',
+ Keyword.Declaration: '#C678DD',
+ Keyword.Namespace: '#C678DD',
+ Keyword.Reserved: '#C678DD',
+ Keyword.Type: '#E5C07B',
+
+ Name: '#E06C75',
+ Name.Attribute: '#E06C75',
+ Name.Builtin: '#E5C07B',
+ Name.Class: '#E5C07B',
+ Name.Function: 'bold #61AFEF',
+ Name.Function.Magic: 'bold #56B6C2',
+ Name.Other: '#E06C75',
+ Name.Tag: '#E06C75',
+ Name.Decorator: '#61AFEF',
+ Name.Variable.Class: '',
+
+ String: '#98C379',
+
+ Number: '#D19A66',
+
+ Operator: '#56B6C2',
+
+ Comment: '#7F848E'
+ }
diff --git a/pygments/styles/paraiso_dark.py b/pygments/styles/paraiso_dark.py
new file mode 100644
index 0000000..06675b7
--- /dev/null
+++ b/pygments/styles/paraiso_dark.py
@@ -0,0 +1,119 @@
+"""
+ pygments.styles.paraiso_dark
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Paraíso (Dark) by Jan T. Sott
+
+ Pygments template by Jan T. Sott (https://github.com/idleberg)
+ Created with Base16 Builder by Chris Kempson
+ (https://github.com/chriskempson/base16-builder).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Text, \
+ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
+
+
+BACKGROUND = "#2f1e2e"
+CURRENT_LINE = "#41323f"
+SELECTION = "#4f424c"
+FOREGROUND = "#e7e9db"
+COMMENT = "#776e71"
+RED = "#ef6155"
+ORANGE = "#f99b15"
+YELLOW = "#fec418"
+GREEN = "#48b685"
+AQUA = "#5bc4bf"
+BLUE = "#06b6ef"
+PURPLE = "#815ba4"
+
+
+class ParaisoDarkStyle(Style):
+
+ background_color = BACKGROUND
+ highlight_color = SELECTION
+
+ styles = {
+ # No corresponding class for the following:
+ Text: FOREGROUND, # class: ''
+ Whitespace: "", # class: 'w'
+ Error: RED, # class: 'err'
+ Other: "", # class 'x'
+
+ Comment: COMMENT, # class: 'c'
+ Comment.Multiline: "", # class: 'cm'
+ Comment.Preproc: "", # class: 'cp'
+ Comment.Single: "", # class: 'c1'
+ Comment.Special: "", # class: 'cs'
+
+ Keyword: PURPLE, # class: 'k'
+ Keyword.Constant: "", # class: 'kc'
+ Keyword.Declaration: "", # class: 'kd'
+ Keyword.Namespace: AQUA, # class: 'kn'
+ Keyword.Pseudo: "", # class: 'kp'
+ Keyword.Reserved: "", # class: 'kr'
+ Keyword.Type: YELLOW, # class: 'kt'
+
+ Operator: AQUA, # class: 'o'
+ Operator.Word: "", # class: 'ow' - like keywords
+
+ Punctuation: FOREGROUND, # class: 'p'
+
+ Name: FOREGROUND, # class: 'n'
+ Name.Attribute: BLUE, # class: 'na' - to be revised
+ Name.Builtin: "", # class: 'nb'
+ Name.Builtin.Pseudo: "", # class: 'bp'
+ Name.Class: YELLOW, # class: 'nc' - to be revised
+ Name.Constant: RED, # class: 'no' - to be revised
+ Name.Decorator: AQUA, # class: 'nd' - to be revised
+ Name.Entity: "", # class: 'ni'
+ Name.Exception: RED, # class: 'ne'
+ Name.Function: BLUE, # class: 'nf'
+ Name.Property: "", # class: 'py'
+ Name.Label: "", # class: 'nl'
+ Name.Namespace: YELLOW, # class: 'nn' - to be revised
+ Name.Other: BLUE, # class: 'nx'
+ Name.Tag: AQUA, # class: 'nt' - like a keyword
+ Name.Variable: RED, # class: 'nv' - to be revised
+ Name.Variable.Class: "", # class: 'vc' - to be revised
+ Name.Variable.Global: "", # class: 'vg' - to be revised
+ Name.Variable.Instance: "", # class: 'vi' - to be revised
+
+ Number: ORANGE, # class: 'm'
+ Number.Float: "", # class: 'mf'
+ Number.Hex: "", # class: 'mh'
+ Number.Integer: "", # class: 'mi'
+ Number.Integer.Long: "", # class: 'il'
+ Number.Oct: "", # class: 'mo'
+
+ Literal: ORANGE, # class: 'l'
+ Literal.Date: GREEN, # class: 'ld'
+
+ String: GREEN, # class: 's'
+ String.Backtick: "", # class: 'sb'
+ String.Char: FOREGROUND, # class: 'sc'
+ String.Doc: COMMENT, # class: 'sd' - like a comment
+ String.Double: "", # class: 's2'
+ String.Escape: ORANGE, # class: 'se'
+ String.Heredoc: "", # class: 'sh'
+ String.Interpol: ORANGE, # class: 'si'
+ String.Other: "", # class: 'sx'
+ String.Regex: "", # class: 'sr'
+ String.Single: "", # class: 's1'
+ String.Symbol: "", # class: 'ss'
+
+ Generic: "", # class: 'g'
+ Generic.Deleted: RED, # class: 'gd',
+ Generic.Emph: "italic", # class: 'ge'
+ Generic.Error: "", # class: 'gr'
+ Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
+ Generic.Inserted: GREEN, # class: 'gi'
+ Generic.Output: "", # class: 'go'
+ Generic.Prompt: "bold " + COMMENT, # class: 'gp'
+ Generic.Strong: "bold", # class: 'gs'
+ Generic.Subheading: "bold " + AQUA, # class: 'gu'
+ Generic.Traceback: "", # class: 'gt'
+ }
diff --git a/pygments/styles/paraiso_light.py b/pygments/styles/paraiso_light.py
new file mode 100644
index 0000000..e353daa
--- /dev/null
+++ b/pygments/styles/paraiso_light.py
@@ -0,0 +1,119 @@
+"""
+ pygments.styles.paraiso_light
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Paraíso (Light) by Jan T. Sott
+
+ Pygments template by Jan T. Sott (https://github.com/idleberg)
+ Created with Base16 Builder by Chris Kempson
+ (https://github.com/chriskempson/base16-builder).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, Text, \
+ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
+
+
+BACKGROUND = "#e7e9db"
+CURRENT_LINE = "#b9b6b0"
+SELECTION = "#a39e9b"
+FOREGROUND = "#2f1e2e"
+COMMENT = "#8d8687"
+RED = "#ef6155"
+ORANGE = "#f99b15"
+YELLOW = "#fec418"
+GREEN = "#48b685"
+AQUA = "#5bc4bf"
+BLUE = "#06b6ef"
+PURPLE = "#815ba4"
+
+
+class ParaisoLightStyle(Style):
+
+ background_color = BACKGROUND
+ highlight_color = SELECTION
+
+ styles = {
+ # No corresponding class for the following:
+ Text: FOREGROUND, # class: ''
+ Whitespace: "", # class: 'w'
+ Error: RED, # class: 'err'
+ Other: "", # class 'x'
+
+ Comment: COMMENT, # class: 'c'
+ Comment.Multiline: "", # class: 'cm'
+ Comment.Preproc: "", # class: 'cp'
+ Comment.Single: "", # class: 'c1'
+ Comment.Special: "", # class: 'cs'
+
+ Keyword: PURPLE, # class: 'k'
+ Keyword.Constant: "", # class: 'kc'
+ Keyword.Declaration: "", # class: 'kd'
+ Keyword.Namespace: AQUA, # class: 'kn'
+ Keyword.Pseudo: "", # class: 'kp'
+ Keyword.Reserved: "", # class: 'kr'
+ Keyword.Type: YELLOW, # class: 'kt'
+
+ Operator: AQUA, # class: 'o'
+ Operator.Word: "", # class: 'ow' - like keywords
+
+ Punctuation: FOREGROUND, # class: 'p'
+
+ Name: FOREGROUND, # class: 'n'
+ Name.Attribute: BLUE, # class: 'na' - to be revised
+ Name.Builtin: "", # class: 'nb'
+ Name.Builtin.Pseudo: "", # class: 'bp'
+ Name.Class: YELLOW, # class: 'nc' - to be revised
+ Name.Constant: RED, # class: 'no' - to be revised
+ Name.Decorator: AQUA, # class: 'nd' - to be revised
+ Name.Entity: "", # class: 'ni'
+ Name.Exception: RED, # class: 'ne'
+ Name.Function: BLUE, # class: 'nf'
+ Name.Property: "", # class: 'py'
+ Name.Label: "", # class: 'nl'
+ Name.Namespace: YELLOW, # class: 'nn' - to be revised
+ Name.Other: BLUE, # class: 'nx'
+ Name.Tag: AQUA, # class: 'nt' - like a keyword
+ Name.Variable: RED, # class: 'nv' - to be revised
+ Name.Variable.Class: "", # class: 'vc' - to be revised
+ Name.Variable.Global: "", # class: 'vg' - to be revised
+ Name.Variable.Instance: "", # class: 'vi' - to be revised
+
+ Number: ORANGE, # class: 'm'
+ Number.Float: "", # class: 'mf'
+ Number.Hex: "", # class: 'mh'
+ Number.Integer: "", # class: 'mi'
+ Number.Integer.Long: "", # class: 'il'
+ Number.Oct: "", # class: 'mo'
+
+ Literal: ORANGE, # class: 'l'
+ Literal.Date: GREEN, # class: 'ld'
+
+ String: GREEN, # class: 's'
+ String.Backtick: "", # class: 'sb'
+ String.Char: FOREGROUND, # class: 'sc'
+ String.Doc: COMMENT, # class: 'sd' - like a comment
+ String.Double: "", # class: 's2'
+ String.Escape: ORANGE, # class: 'se'
+ String.Heredoc: "", # class: 'sh'
+ String.Interpol: ORANGE, # class: 'si'
+ String.Other: "", # class: 'sx'
+ String.Regex: "", # class: 'sr'
+ String.Single: "", # class: 's1'
+ String.Symbol: "", # class: 'ss'
+
+ Generic: "", # class: 'g'
+ Generic.Deleted: RED, # class: 'gd',
+ Generic.Emph: "italic", # class: 'ge'
+ Generic.Error: "", # class: 'gr'
+ Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
+ Generic.Inserted: GREEN, # class: 'gi'
+ Generic.Output: "", # class: 'go'
+ Generic.Prompt: "bold " + COMMENT, # class: 'gp'
+ Generic.Strong: "bold", # class: 'gs'
+ Generic.Subheading: "bold " + AQUA, # class: 'gu'
+ Generic.Traceback: "", # class: 'gt'
+ }
diff --git a/pygments/styles/pastie.py b/pygments/styles/pastie.py
new file mode 100644
index 0000000..de327b3
--- /dev/null
+++ b/pygments/styles/pastie.py
@@ -0,0 +1,72 @@
+"""
+ pygments.styles.pastie
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Style similar to the `pastie`_ default style.
+
+ .. _pastie: http://pastie.caboo.se/
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class PastieStyle(Style):
+ """
+ Style similar to the pastie default style.
+ """
+
+ styles = {
+ Whitespace: '#bbbbbb',
+ Comment: '#888888',
+ Comment.Preproc: 'bold #cc0000',
+ Comment.Special: 'bg:#fff0f0 bold #cc0000',
+
+ String: 'bg:#fff0f0 #dd2200',
+ String.Regex: 'bg:#fff0ff #008800',
+ String.Other: 'bg:#f0fff0 #22bb22',
+ String.Symbol: '#aa6600',
+ String.Interpol: '#3333bb',
+ String.Escape: '#0044dd',
+
+ Operator.Word: '#008800',
+
+ Keyword: 'bold #008800',
+ Keyword.Pseudo: 'nobold',
+ Keyword.Type: '#888888',
+
+ Name.Class: 'bold #bb0066',
+ Name.Exception: 'bold #bb0066',
+ Name.Function: 'bold #0066bb',
+ Name.Property: 'bold #336699',
+ Name.Namespace: 'bold #bb0066',
+ Name.Builtin: '#003388',
+ Name.Variable: '#336699',
+ Name.Variable.Class: '#336699',
+ Name.Variable.Instance: '#3333bb',
+ Name.Variable.Global: '#dd7700',
+ Name.Constant: 'bold #003366',
+ Name.Tag: 'bold #bb0066',
+ Name.Attribute: '#336699',
+ Name.Decorator: '#555555',
+ Name.Label: 'italic #336699',
+
+ Number: 'bold #0000DD',
+
+ Generic.Heading: '#333',
+ Generic.Subheading: '#666',
+ Generic.Deleted: 'bg:#ffdddd #000000',
+ Generic.Inserted: 'bg:#ddffdd #000000',
+ Generic.Error: '#aa0000',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#555555',
+ Generic.Output: '#888888',
+ Generic.Traceback: '#aa0000',
+
+ Error: 'bg:#e3d2d2 #a61717'
+ }
diff --git a/pygments/styles/perldoc.py b/pygments/styles/perldoc.py
new file mode 100644
index 0000000..6e415de
--- /dev/null
+++ b/pygments/styles/perldoc.py
@@ -0,0 +1,67 @@
+"""
+ pygments.styles.perldoc
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Style similar to the style used in the `perldoc`_ code blocks.
+
+ .. _perldoc: http://perldoc.perl.org/
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class PerldocStyle(Style):
+ """
+ Style similar to the style used in the perldoc code blocks.
+ """
+
+ background_color = '#eeeedd'
+
+ styles = {
+ Whitespace: '#bbbbbb',
+ Comment: '#228B22',
+ Comment.Preproc: '#1e889b',
+ Comment.Special: '#8B008B bold',
+
+ String: '#CD5555',
+ String.Heredoc: '#1c7e71 italic',
+ String.Regex: '#B452CD',
+ String.Other: '#cb6c20',
+ String.Regex: '#1c7e71',
+
+ Number: '#B452CD',
+
+ Operator.Word: '#8B008B',
+
+ Keyword: '#8B008B bold',
+ Keyword.Type: '#00688B',
+
+ Name.Class: '#008b45 bold',
+ Name.Exception: '#008b45 bold',
+ Name.Function: '#008b45',
+ Name.Namespace: '#008b45 underline',
+ Name.Variable: '#00688B',
+ Name.Constant: '#00688B',
+ Name.Decorator: '#707a7c',
+ Name.Tag: '#8B008B bold',
+ Name.Attribute: '#658b00',
+ Name.Builtin: '#658b00',
+
+ Generic.Heading: 'bold #000080',
+ Generic.Subheading: 'bold #800080',
+ Generic.Deleted: '#aa0000',
+ Generic.Inserted: '#00aa00',
+ Generic.Error: '#aa0000',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#555555',
+ Generic.Output: '#888888',
+ Generic.Traceback: '#aa0000',
+
+ Error: 'bg:#e3d2d2 #a61717'
+ }
diff --git a/pygments/styles/rainbow_dash.py b/pygments/styles/rainbow_dash.py
new file mode 100644
index 0000000..e97d1c5
--- /dev/null
+++ b/pygments/styles/rainbow_dash.py
@@ -0,0 +1,88 @@
+"""
+ pygments.styles.rainbow_dash
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A bright and colorful syntax highlighting `theme`.
+
+ .. _theme: http://sanssecours.github.io/Rainbow-Dash.tmbundle
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Comment, Error, Generic, Name, Number, Operator, \
+ String, Text, Whitespace, Keyword
+
+BLUE_LIGHT = '#0080ff'
+BLUE = '#2c5dcd'
+GREEN = '#00cc66'
+GREEN_LIGHT = '#ccffcc'
+GREEN_NEON = '#00cc00'
+GREY = '#aaaaaa'
+GREY_LIGHT = '#cbcbcb'
+GREY_DARK = '#4d4d4d'
+PURPLE = '#5918bb'
+RED = '#cc0000'
+RED_DARK = '#c5060b'
+RED_LIGHT = '#ffcccc'
+RED_BRIGHT = '#ff0000'
+WHITE = '#ffffff'
+TURQUOISE = '#318495'
+ORANGE = '#ff8000'
+
+
+class RainbowDashStyle(Style):
+ """
+ A bright and colorful syntax highlighting theme.
+ """
+
+ background_color = WHITE
+
+ styles = {
+ Comment: 'italic {}'.format(BLUE_LIGHT),
+ Comment.Preproc: 'noitalic',
+ Comment.Special: 'bold',
+
+ Error: 'bg:{} {}'.format(RED, WHITE),
+
+ Generic.Deleted: 'border:{} bg:{}'.format(RED_DARK, RED_LIGHT),
+ Generic.Emph: 'italic',
+ Generic.Error: RED_BRIGHT,
+ Generic.Heading: 'bold {}'.format(BLUE),
+ Generic.Inserted: 'border:{} bg:{}'.format(GREEN_NEON, GREEN_LIGHT),
+ Generic.Output: GREY,
+ Generic.Prompt: 'bold {}'.format(BLUE),
+ Generic.Strong: 'bold',
+ Generic.Subheading: 'bold {}'.format(BLUE),
+ Generic.Traceback: RED_DARK,
+
+ Keyword: 'bold {}'.format(BLUE),
+ Keyword.Pseudo: 'nobold',
+ Keyword.Type: PURPLE,
+
+ Name.Attribute: 'italic {}'.format(BLUE),
+ Name.Builtin: 'bold {}'.format(PURPLE),
+ Name.Class: 'underline',
+ Name.Constant: TURQUOISE,
+ Name.Decorator: 'bold {}'.format(ORANGE),
+ Name.Entity: 'bold {}'.format(PURPLE),
+ Name.Exception: 'bold {}'.format(PURPLE),
+ Name.Function: 'bold {}'.format(ORANGE),
+ Name.Tag: 'bold {}'.format(BLUE),
+
+ Number: 'bold {}'.format(PURPLE),
+
+ Operator: BLUE,
+ Operator.Word: 'bold',
+
+ String: GREEN,
+ String.Doc: 'italic',
+ String.Escape: 'bold {}'.format(RED_DARK),
+ String.Other: TURQUOISE,
+ String.Symbol: 'bold {}'.format(RED_DARK),
+
+ Text: GREY_DARK,
+
+ Whitespace: GREY_LIGHT
+ }
diff --git a/pygments/styles/rrt.py b/pygments/styles/rrt.py
new file mode 100644
index 0000000..ce248f6
--- /dev/null
+++ b/pygments/styles/rrt.py
@@ -0,0 +1,33 @@
+"""
+ pygments.styles.rrt
+ ~~~~~~~~~~~~~~~~~~~
+
+ pygments "rrt" theme, based on Zap and Emacs defaults.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Token, Comment, Name, Keyword, String
+
+
+class RrtStyle(Style):
+ """
+ Minimalistic "rrt" theme, based on Zap and Emacs defaults.
+ """
+
+ background_color = '#000000'
+ highlight_color = '#0000ff'
+
+ styles = {
+ Token: '#dddddd',
+ Comment: '#00ff00',
+ Name.Function: '#ffff00',
+ Name.Variable: '#eedd82',
+ Name.Constant: '#7fffd4',
+ Keyword: '#ff0000',
+ Comment.Preproc: '#e5e5e5',
+ String: '#87ceeb',
+ Keyword.Type: '#ee82ee',
+ }
diff --git a/pygments/styles/sas.py b/pygments/styles/sas.py
new file mode 100644
index 0000000..0748933
--- /dev/null
+++ b/pygments/styles/sas.py
@@ -0,0 +1,41 @@
+"""
+ pygments.styles.sas
+ ~~~~~~~~~~~~~~~~~~~
+
+ Style inspired by SAS' enhanced program editor. Note This is not
+ meant to be a complete style. It's merely meant to mimic SAS'
+ program editor syntax highlighting.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Other, Whitespace, Generic
+
+
+class SasStyle(Style):
+ """
+ Style inspired by SAS' enhanced program editor. Note This is not
+ meant to be a complete style. It's merely meant to mimic SAS'
+ program editor syntax highlighting.
+ """
+
+ styles = {
+ Whitespace: '#bbbbbb',
+ Comment: 'italic #008800',
+ String: '#800080',
+ Number: 'bold #2c8553',
+ Other: 'bg:#ffffe0',
+ Keyword: '#2c2cff',
+ Keyword.Reserved: 'bold #353580',
+ Keyword.Constant: 'bold',
+ Name.Builtin: '#2c2cff',
+ Name.Function: 'bold italic',
+ Name.Variable: 'bold #2c2cff',
+ Generic: '#2c2cff',
+ Generic.Emph: '#008800',
+ Generic.Error: '#d30202',
+ Error: 'bg:#e3d2d2 #a61717'
+ }
diff --git a/pygments/styles/solarized.py b/pygments/styles/solarized.py
new file mode 100644
index 0000000..fc38235
--- /dev/null
+++ b/pygments/styles/solarized.py
@@ -0,0 +1,136 @@
+"""
+ pygments.styles.solarized
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Solarized by Camil Staps
+
+ A Pygments style for the Solarized themes (licensed under MIT).
+ See: https://github.com/altercation/solarized
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Comment, Error, Generic, Keyword, Name, Number, \
+ Operator, String, Token
+
+
+def make_style(colors):
+ return {
+ Token: colors['base0'],
+
+ Comment: 'italic ' + colors['base01'],
+ Comment.Hashbang: colors['base01'],
+ Comment.Multiline: colors['base01'],
+ Comment.Preproc: 'noitalic ' + colors['magenta'],
+ Comment.PreprocFile: 'noitalic ' + colors['base01'],
+
+ Keyword: colors['green'],
+ Keyword.Constant: colors['cyan'],
+ Keyword.Declaration: colors['cyan'],
+ Keyword.Namespace: colors['orange'],
+ Keyword.Type: colors['yellow'],
+
+ Operator: colors['base01'],
+ Operator.Word: colors['green'],
+
+ Name.Builtin: colors['blue'],
+ Name.Builtin.Pseudo: colors['blue'],
+ Name.Class: colors['blue'],
+ Name.Constant: colors['blue'],
+ Name.Decorator: colors['blue'],
+ Name.Entity: colors['blue'],
+ Name.Exception: colors['blue'],
+ Name.Function: colors['blue'],
+ Name.Function.Magic: colors['blue'],
+ Name.Label: colors['blue'],
+ Name.Namespace: colors['blue'],
+ Name.Tag: colors['blue'],
+ Name.Variable: colors['blue'],
+ Name.Variable.Global:colors['blue'],
+ Name.Variable.Magic: colors['blue'],
+
+ String: colors['cyan'],
+ String.Doc: colors['base01'],
+ String.Regex: colors['orange'],
+
+ Number: colors['cyan'],
+
+ Generic: colors['base0'],
+ Generic.Deleted: colors['red'],
+ Generic.Emph: 'italic',
+ Generic.Error: colors['red'],
+ Generic.Heading: 'bold',
+ Generic.Subheading: 'underline',
+ Generic.Inserted: colors['green'],
+ Generic.Output: colors['base0'],
+ Generic.Prompt: 'bold ' + colors['blue'],
+ Generic.Strong: 'bold',
+ Generic.Traceback: colors['blue'],
+
+ Error: 'bg:' + colors['red'],
+ }
+
+
+DARK_COLORS = {
+ 'base03': '#002b36',
+ 'base02': '#073642',
+ 'base01': '#586e75',
+ 'base00': '#657b83',
+ 'base0': '#839496',
+ 'base1': '#93a1a1',
+ 'base2': '#eee8d5',
+ 'base3': '#fdf6e3',
+ 'yellow': '#b58900',
+ 'orange': '#cb4b16',
+ 'red': '#dc322f',
+ 'magenta': '#d33682',
+ 'violet': '#6c71c4',
+ 'blue': '#268bd2',
+ 'cyan': '#2aa198',
+ 'green': '#859900',
+}
+
+LIGHT_COLORS = {
+ 'base3': '#002b36',
+ 'base2': '#073642',
+ 'base1': '#586e75',
+ 'base0': '#657b83',
+ 'base00': '#839496',
+ 'base01': '#93a1a1',
+ 'base02': '#eee8d5',
+ 'base03': '#fdf6e3',
+ 'yellow': '#b58900',
+ 'orange': '#cb4b16',
+ 'red': '#dc322f',
+ 'magenta': '#d33682',
+ 'violet': '#6c71c4',
+ 'blue': '#268bd2',
+ 'cyan': '#2aa198',
+ 'green': '#859900',
+}
+
+
+class SolarizedDarkStyle(Style):
+ """
+ The solarized style, dark.
+ """
+
+ styles = make_style(DARK_COLORS)
+ background_color = DARK_COLORS['base03']
+ highlight_color = DARK_COLORS['base02']
+ line_number_color = DARK_COLORS['base01']
+ line_number_background_color = DARK_COLORS['base02']
+
+
+class SolarizedLightStyle(SolarizedDarkStyle):
+ """
+ The solarized style, light.
+ """
+
+ styles = make_style(LIGHT_COLORS)
+ background_color = LIGHT_COLORS['base03']
+ highlight_color = LIGHT_COLORS['base02']
+ line_number_color = LIGHT_COLORS['base01']
+ line_number_background_color = LIGHT_COLORS['base02']
diff --git a/pygments/styles/staroffice.py b/pygments/styles/staroffice.py
new file mode 100644
index 0000000..6589c1b
--- /dev/null
+++ b/pygments/styles/staroffice.py
@@ -0,0 +1,26 @@
+"""
+ pygments.styles.staroffice
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Style similar to StarOffice style, also in OpenOffice and LibreOffice.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Comment, Error, Literal, Name, Token
+
+
+class StarofficeStyle(Style):
+ """
+ Style similar to StarOffice style, also in OpenOffice and LibreOffice.
+ """
+
+ styles = {
+ Token: '#000080', # Blue
+ Comment: '#696969', # DimGray
+ Error: '#800000', # Maroon
+ Literal: '#EE0000', # Red
+ Name: '#008000', # Green
+ }
diff --git a/pygments/styles/stata_dark.py b/pygments/styles/stata_dark.py
new file mode 100644
index 0000000..29ae051
--- /dev/null
+++ b/pygments/styles/stata_dark.py
@@ -0,0 +1,38 @@
+"""
+ pygments.styles.stata_dark
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Dark style inspired by Stata's do-file editor. Note this is not
+ meant to be a complete style, just for Stata's file formats.
+
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Token, Keyword, Name, Comment, String, Error, \
+ Number, Operator, Whitespace, Generic
+
+
+class StataDarkStyle(Style):
+
+ background_color = "#232629"
+ highlight_color = "#49483e"
+
+ styles = {
+ Token: '#cccccc',
+ Whitespace: '#bbbbbb',
+ Error: 'bg:#e3d2d2 #a61717',
+ String: '#51cc99',
+ Number: '#4FB8CC',
+ Operator: '',
+ Name.Function: '#6a6aff',
+ Name.Other: '#e2828e',
+ Keyword: 'bold #7686bb',
+ Keyword.Constant: '',
+ Comment: 'italic #777777',
+ Name.Variable: 'bold #7AB4DB',
+ Name.Variable.Global: 'bold #BE646C',
+ Generic.Prompt: '#ffffff',
+ }
diff --git a/pygments/styles/stata_light.py b/pygments/styles/stata_light.py
new file mode 100644
index 0000000..9e9dae6
--- /dev/null
+++ b/pygments/styles/stata_light.py
@@ -0,0 +1,37 @@
+"""
+ pygments.styles.stata_light
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Light Style inspired by Stata's do-file editor. Note this is not
+ meant to be a complete style, just for Stata's file formats.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Whitespace, Text
+
+
+class StataLightStyle(Style):
+ """
+ Light mode style inspired by Stata's do-file editor. This is not
+ meant to be a complete style, just for use with Stata.
+ """
+
+ styles = {
+ Text: '#111111',
+ Whitespace: '#bbbbbb',
+ Error: 'bg:#e3d2d2 #a61717',
+ String: '#7a2424',
+ Number: '#2c2cff',
+ Operator: '',
+ Name.Function: '#2c2cff',
+ Name.Other: '#be646c',
+ Keyword: 'bold #353580',
+ Keyword.Constant: '',
+ Comment: 'italic #008800',
+ Name.Variable: 'bold #35baba',
+ Name.Variable.Global: 'bold #b5565e',
+ }
diff --git a/pygments/styles/tango.py b/pygments/styles/tango.py
new file mode 100644
index 0000000..694e658
--- /dev/null
+++ b/pygments/styles/tango.py
@@ -0,0 +1,139 @@
+"""
+ pygments.styles.tango
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ The Crunchy default Style inspired from the color palette from
+ the Tango Icon Theme Guidelines.
+
+ http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines
+
+ Butter: #fce94f #edd400 #c4a000
+ Orange: #fcaf3e #f57900 #ce5c00
+ Chocolate: #e9b96e #c17d11 #8f5902
+ Chameleon: #8ae234 #73d216 #4e9a06
+ Sky Blue: #729fcf #3465a4 #204a87
+ Plum: #ad7fa8 #75507b #5c35cc
+ Scarlet Red:#ef2929 #cc0000 #a40000
+ Aluminium: #eeeeec #d3d7cf #babdb6
+ #888a85 #555753 #2e3436
+
+ Not all of the above colors are used; other colors added:
+ very light grey: #f8f8f8 (for background)
+
+ This style can be used as a template as it includes all the known
+ Token types, unlike most (if not all) of the styles included in the
+ Pygments distribution.
+
+ However, since Crunchy is intended to be used by beginners, we have strived
+ to create a style that gloss over subtle distinctions between different
+ categories.
+
+ Taking Python for example, comments (Comment.*) and docstrings (String.Doc)
+ have been chosen to have the same style. Similarly, keywords (Keyword.*),
+ and Operator.Word (and, or, in) have been assigned the same style.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
+
+
+class TangoStyle(Style):
+ """
+ The Crunchy default Style inspired from the color palette from
+ the Tango Icon Theme Guidelines.
+ """
+
+ # work in progress...
+
+ background_color = "#f8f8f8"
+
+ styles = {
+ # No corresponding class for the following:
+ #Text: "", # class: ''
+ Whitespace: "#f8f8f8", # class: 'w'
+ Error: "#a40000 border:#ef2929", # class: 'err'
+ Other: "#000000", # class 'x'
+
+ Comment: "italic #8f5902", # class: 'c'
+ Comment.Multiline: "italic #8f5902", # class: 'cm'
+ Comment.Preproc: "italic #8f5902", # class: 'cp'
+ Comment.Single: "italic #8f5902", # class: 'c1'
+ Comment.Special: "italic #8f5902", # class: 'cs'
+
+ Keyword: "bold #204a87", # class: 'k'
+ Keyword.Constant: "bold #204a87", # class: 'kc'
+ Keyword.Declaration: "bold #204a87", # class: 'kd'
+ Keyword.Namespace: "bold #204a87", # class: 'kn'
+ Keyword.Pseudo: "bold #204a87", # class: 'kp'
+ Keyword.Reserved: "bold #204a87", # class: 'kr'
+ Keyword.Type: "bold #204a87", # class: 'kt'
+
+ Operator: "bold #ce5c00", # class: 'o'
+ Operator.Word: "bold #204a87", # class: 'ow' - like keywords
+
+ Punctuation: "bold #000000", # class: 'p'
+
+ # because special names such as Name.Class, Name.Function, etc.
+ # are not recognized as such later in the parsing, we choose them
+ # to look the same as ordinary variables.
+ Name: "#000000", # class: 'n'
+ Name.Attribute: "#c4a000", # class: 'na' - to be revised
+ Name.Builtin: "#204a87", # class: 'nb'
+ Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
+ Name.Class: "#000000", # class: 'nc' - to be revised
+ Name.Constant: "#000000", # class: 'no' - to be revised
+ Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
+ Name.Entity: "#ce5c00", # class: 'ni'
+ Name.Exception: "bold #cc0000", # class: 'ne'
+ Name.Function: "#000000", # class: 'nf'
+ Name.Property: "#000000", # class: 'py'
+ Name.Label: "#f57900", # class: 'nl'
+ Name.Namespace: "#000000", # class: 'nn' - to be revised
+ Name.Other: "#000000", # class: 'nx'
+ Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
+ Name.Variable: "#000000", # class: 'nv' - to be revised
+ Name.Variable.Class: "#000000", # class: 'vc' - to be revised
+ Name.Variable.Global: "#000000", # class: 'vg' - to be revised
+ Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
+
+ # since the tango light blue does not show up well in text, we choose
+ # a pure blue instead.
+ Number: "bold #0000cf", # class: 'm'
+ Number.Float: "bold #0000cf", # class: 'mf'
+ Number.Hex: "bold #0000cf", # class: 'mh'
+ Number.Integer: "bold #0000cf", # class: 'mi'
+ Number.Integer.Long: "bold #0000cf", # class: 'il'
+ Number.Oct: "bold #0000cf", # class: 'mo'
+
+ Literal: "#000000", # class: 'l'
+ Literal.Date: "#000000", # class: 'ld'
+
+ String: "#4e9a06", # class: 's'
+ String.Backtick: "#4e9a06", # class: 'sb'
+ String.Char: "#4e9a06", # class: 'sc'
+ String.Doc: "italic #8f5902", # class: 'sd' - like a comment
+ String.Double: "#4e9a06", # class: 's2'
+ String.Escape: "#4e9a06", # class: 'se'
+ String.Heredoc: "#4e9a06", # class: 'sh'
+ String.Interpol: "#4e9a06", # class: 'si'
+ String.Other: "#4e9a06", # class: 'sx'
+ String.Regex: "#4e9a06", # class: 'sr'
+ String.Single: "#4e9a06", # class: 's1'
+ String.Symbol: "#4e9a06", # class: 'ss'
+
+ Generic: "#000000", # class: 'g'
+ Generic.Deleted: "#a40000", # class: 'gd'
+ Generic.Emph: "italic #000000", # class: 'ge'
+ Generic.Error: "#ef2929", # class: 'gr'
+ Generic.Heading: "bold #000080", # class: 'gh'
+ Generic.Inserted: "#00A000", # class: 'gi'
+ Generic.Output: "italic #000000", # class: 'go'
+ Generic.Prompt: "#8f5902", # class: 'gp'
+ Generic.Strong: "bold #000000", # class: 'gs'
+ Generic.Subheading: "bold #800080", # class: 'gu'
+ Generic.Traceback: "bold #a40000", # class: 'gt'
+ }
diff --git a/pygments/styles/trac.py b/pygments/styles/trac.py
new file mode 100644
index 0000000..3d5e20e
--- /dev/null
+++ b/pygments/styles/trac.py
@@ -0,0 +1,60 @@
+"""
+ pygments.styles.trac
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Port of the default trac highlighter design.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace
+
+
+class TracStyle(Style):
+ """
+ Port of the default trac highlighter design.
+ """
+
+ styles = {
+ Whitespace: '#bbbbbb',
+ Comment: 'italic #999988',
+ Comment.Preproc: 'bold noitalic #999999',
+ Comment.Special: 'bold #999999',
+
+ Operator: 'bold',
+
+ String: '#bb8844',
+ String.Regex: '#808000',
+
+ Number: '#009999',
+
+ Keyword: 'bold',
+ Keyword.Type: '#445588',
+
+ Name.Builtin: '#999999',
+ Name.Function: 'bold #990000',
+ Name.Class: 'bold #445588',
+ Name.Exception: 'bold #990000',
+ Name.Namespace: '#555555',
+ Name.Variable: '#008080',
+ Name.Constant: '#008080',
+ Name.Tag: '#000080',
+ Name.Attribute: '#008080',
+ Name.Entity: '#800080',
+
+ Generic.Heading: '#999999',
+ Generic.Subheading: '#aaaaaa',
+ Generic.Deleted: 'bg:#ffdddd #000000',
+ Generic.Inserted: 'bg:#ddffdd #000000',
+ Generic.Error: '#aa0000',
+ Generic.Emph: 'italic',
+ Generic.Strong: 'bold',
+ Generic.Prompt: '#555555',
+ Generic.Output: '#888888',
+ Generic.Traceback: '#aa0000',
+
+ Error: 'bg:#e3d2d2 #a61717'
+ }
diff --git a/pygments/styles/vim.py b/pygments/styles/vim.py
new file mode 100644
index 0000000..ba87c53
--- /dev/null
+++ b/pygments/styles/vim.py
@@ -0,0 +1,61 @@
+"""
+ pygments.styles.vim
+ ~~~~~~~~~~~~~~~~~~~
+
+ A highlighting style for Pygments, inspired by vim.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace, Token
+
+
+class VimStyle(Style):
+ """
+ Styles somewhat like vim 7.0
+ """
+
+ background_color = "#000000"
+ highlight_color = "#222222"
+
+ styles = {
+ Token: "#cccccc",
+ Whitespace: "",
+ Comment: "#000080",
+ Comment.Preproc: "",
+ Comment.Special: "bold #cd0000",
+
+ Keyword: "#cdcd00",
+ Keyword.Declaration: "#00cd00",
+ Keyword.Namespace: "#cd00cd",
+ Keyword.Pseudo: "",
+ Keyword.Type: "#00cd00",
+
+ Operator: "#3399cc",
+ Operator.Word: "#cdcd00",
+
+ Name: "",
+ Name.Class: "#00cdcd",
+ Name.Builtin: "#cd00cd",
+ Name.Exception: "bold #666699",
+ Name.Variable: "#00cdcd",
+
+ String: "#cd0000",
+ Number: "#cd00cd",
+
+ Generic.Heading: "bold #000080",
+ Generic.Subheading: "bold #800080",
+ Generic.Deleted: "#cd0000",
+ Generic.Inserted: "#00cd00",
+ Generic.Error: "#FF0000",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold #000080",
+ Generic.Output: "#888",
+ Generic.Traceback: "#04D",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/vs.py b/pygments/styles/vs.py
new file mode 100644
index 0000000..bfc470b
--- /dev/null
+++ b/pygments/styles/vs.py
@@ -0,0 +1,36 @@
+"""
+ pygments.styles.vs
+ ~~~~~~~~~~~~~~~~~~
+
+ Simple style with MS Visual Studio colors.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Operator, Generic
+
+
+class VisualStudioStyle(Style):
+
+ background_color = "#ffffff"
+
+ styles = {
+ Comment: "#008000",
+ Comment.Preproc: "#0000ff",
+ Keyword: "#0000ff",
+ Operator.Word: "#0000ff",
+ Keyword.Type: "#2b91af",
+ Name.Class: "#2b91af",
+ String: "#a31515",
+
+ Generic.Heading: "bold",
+ Generic.Subheading: "bold",
+ Generic.Emph: "italic",
+ Generic.Strong: "bold",
+ Generic.Prompt: "bold",
+
+ Error: "border:#FF0000"
+ }
diff --git a/pygments/styles/xcode.py b/pygments/styles/xcode.py
new file mode 100644
index 0000000..13b3d13
--- /dev/null
+++ b/pygments/styles/xcode.py
@@ -0,0 +1,48 @@
+"""
+ pygments.styles.xcode
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Style similar to the `Xcode` default theme.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Literal
+
+
+class XcodeStyle(Style):
+ """
+ Style similar to the Xcode default colouring theme.
+ """
+
+ styles = {
+ Comment: '#177500',
+ Comment.Preproc: '#633820',
+
+ String: '#C41A16',
+ String.Char: '#2300CE',
+
+ Operator: '#000000',
+
+ Keyword: '#A90D91',
+
+ Name: '#000000',
+ Name.Attribute: '#836C28',
+ Name.Class: '#3F6E75',
+ Name.Function: '#000000',
+ Name.Builtin: '#A90D91',
+ # In Obj-C code this token is used to colour Cocoa types
+ Name.Builtin.Pseudo: '#5B269A',
+ Name.Variable: '#000000',
+ Name.Tag: '#000000',
+ Name.Decorator: '#000000',
+ # Workaround for a BUG here: lexer treats multiline method signatres as labels
+ Name.Label: '#000000',
+
+ Literal: '#1C01CE',
+ Number: '#1C01CE',
+ Error: '#000000',
+ }
diff --git a/pygments/styles/zenburn.py b/pygments/styles/zenburn.py
new file mode 100644
index 0000000..c266564
--- /dev/null
+++ b/pygments/styles/zenburn.py
@@ -0,0 +1,78 @@
+"""
+ pygments.styles.zenburn
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Low contrast color scheme Zenburn.
+
+ See: https://kippura.org/zenburnpage/
+ https://github.com/jnurmine/Zenburn
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Token, Name, Operator, Keyword, Generic, Comment, \
+ Number, String, Literal, Punctuation, Error
+
+
+class ZenburnStyle(Style):
+ """
+ Low contrast Zenburn style.
+ """
+
+ background_color = '#3f3f3f'
+ highlight_color = '#484848'
+ line_number_color = '#5d6262'
+ line_number_background_color = '#353535'
+ line_number_special_color = '#7a8080'
+ line_number_special_background_color = '#353535'
+
+ styles = {
+ Token: '#dcdccc',
+ Error: '#e37170 bold',
+
+ Keyword: '#efdcbc',
+ Keyword.Type: '#dfdfbf bold',
+ Keyword.Constant: '#dca3a3',
+ Keyword.Declaration: '#f0dfaf',
+ Keyword.Namespace: '#f0dfaf',
+
+ Name: '#dcdccc',
+ Name.Tag: '#e89393 bold',
+ Name.Entity: '#cfbfaf',
+ Name.Constant: '#dca3a3',
+ Name.Class: '#efef8f',
+ Name.Function: '#efef8f',
+ Name.Builtin: '#efef8f',
+ Name.Builtin.Pseudo: '#dcdccc',
+ Name.Attribute: '#efef8f',
+ Name.Exception: '#c3bf9f bold',
+
+ Literal: '#9fafaf',
+
+ String: '#cc9393',
+ String.Doc: '#7f9f7f',
+ String.Interpol: '#dca3a3 bold',
+
+ Number: '#8cd0d3',
+ Number.Float: '#c0bed1',
+
+ Operator: '#f0efd0',
+
+ Punctuation: '#f0efd0',
+
+ Comment: '#7f9f7f italic',
+ Comment.Preproc: '#dfaf8f bold',
+ Comment.PreprocFile: '#cc9393',
+ Comment.Special: '#dfdfdf bold',
+
+ Generic: '#ecbcbc bold',
+ Generic.Emph: '#ffffff bold',
+ Generic.Output: '#5b605e bold',
+ Generic.Heading: '#efefef bold',
+ Generic.Deleted: '#c3bf9f bg:#313c36',
+ Generic.Inserted: '#709080 bg:#313c36 bold',
+ Generic.Traceback: '#80d4aa bg:#2f2f2f bold',
+ Generic.Subheading: '#efefef bold',
+ }
diff --git a/pygments/token.py b/pygments/token.py
new file mode 100644
index 0000000..e3e565a
--- /dev/null
+++ b/pygments/token.py
@@ -0,0 +1,213 @@
+"""
+ pygments.token
+ ~~~~~~~~~~~~~~
+
+ Basic token types and the standard tokens.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+class _TokenType(tuple):
+ parent = None
+
+ def split(self):
+ buf = []
+ node = self
+ while node is not None:
+ buf.append(node)
+ node = node.parent
+ buf.reverse()
+ return buf
+
+ def __init__(self, *args):
+ # no need to call super.__init__
+ self.subtypes = set()
+
+ def __contains__(self, val):
+ return self is val or (
+ type(val) is self.__class__ and
+ val[:len(self)] == self
+ )
+
+ def __getattr__(self, val):
+ if not val or not val[0].isupper():
+ return tuple.__getattribute__(self, val)
+ new = _TokenType(self + (val,))
+ setattr(self, val, new)
+ self.subtypes.add(new)
+ new.parent = self
+ return new
+
+ def __repr__(self):
+ return 'Token' + (self and '.' or '') + '.'.join(self)
+
+ def __copy__(self):
+ # These instances are supposed to be singletons
+ return self
+
+ def __deepcopy__(self, memo):
+ # These instances are supposed to be singletons
+ return self
+
+
+Token = _TokenType()
+
+# Special token types
+Text = Token.Text
+Whitespace = Text.Whitespace
+Escape = Token.Escape
+Error = Token.Error
+# Text that doesn't belong to this lexer (e.g. HTML in PHP)
+Other = Token.Other
+
+# Common token types for source code
+Keyword = Token.Keyword
+Name = Token.Name
+Literal = Token.Literal
+String = Literal.String
+Number = Literal.Number
+Punctuation = Token.Punctuation
+Operator = Token.Operator
+Comment = Token.Comment
+
+# Generic types for non-source code
+Generic = Token.Generic
+
+# String and some others are not direct children of Token.
+# alias them:
+Token.Token = Token
+Token.String = String
+Token.Number = Number
+
+
+def is_token_subtype(ttype, other):
+ """
+ Return True if ``ttype`` is a subtype of ``other``.
+
+ exists for backwards compatibility. use ``ttype in other`` now.
+ """
+ return ttype in other
+
+
+def string_to_tokentype(s):
+ """
+ Convert a string into a token type::
+
+ >>> string_to_token('String.Double')
+ Token.Literal.String.Double
+ >>> string_to_token('Token.Literal.Number')
+ Token.Literal.Number
+ >>> string_to_token('')
+ Token
+
+ Tokens that are already tokens are returned unchanged:
+
+ >>> string_to_token(String)
+ Token.Literal.String
+ """
+ if isinstance(s, _TokenType):
+ return s
+ if not s:
+ return Token
+ node = Token
+ for item in s.split('.'):
+ node = getattr(node, item)
+ return node
+
+
+# Map standard token types to short names, used in CSS class naming.
+# If you add a new item, please be sure to run this file to perform
+# a consistency check for duplicate values.
+STANDARD_TYPES = {
+ Token: '',
+
+ Text: '',
+ Whitespace: 'w',
+ Escape: 'esc',
+ Error: 'err',
+ Other: 'x',
+
+ Keyword: 'k',
+ Keyword.Constant: 'kc',
+ Keyword.Declaration: 'kd',
+ Keyword.Namespace: 'kn',
+ Keyword.Pseudo: 'kp',
+ Keyword.Reserved: 'kr',
+ Keyword.Type: 'kt',
+
+ Name: 'n',
+ Name.Attribute: 'na',
+ Name.Builtin: 'nb',
+ Name.Builtin.Pseudo: 'bp',
+ Name.Class: 'nc',
+ Name.Constant: 'no',
+ Name.Decorator: 'nd',
+ Name.Entity: 'ni',
+ Name.Exception: 'ne',
+ Name.Function: 'nf',
+ Name.Function.Magic: 'fm',
+ Name.Property: 'py',
+ Name.Label: 'nl',
+ Name.Namespace: 'nn',
+ Name.Other: 'nx',
+ Name.Tag: 'nt',
+ Name.Variable: 'nv',
+ Name.Variable.Class: 'vc',
+ Name.Variable.Global: 'vg',
+ Name.Variable.Instance: 'vi',
+ Name.Variable.Magic: 'vm',
+
+ Literal: 'l',
+ Literal.Date: 'ld',
+
+ String: 's',
+ String.Affix: 'sa',
+ String.Backtick: 'sb',
+ String.Char: 'sc',
+ String.Delimiter: 'dl',
+ String.Doc: 'sd',
+ String.Double: 's2',
+ String.Escape: 'se',
+ String.Heredoc: 'sh',
+ String.Interpol: 'si',
+ String.Other: 'sx',
+ String.Regex: 'sr',
+ String.Single: 's1',
+ String.Symbol: 'ss',
+
+ Number: 'm',
+ Number.Bin: 'mb',
+ Number.Float: 'mf',
+ Number.Hex: 'mh',
+ Number.Integer: 'mi',
+ Number.Integer.Long: 'il',
+ Number.Oct: 'mo',
+
+ Operator: 'o',
+ Operator.Word: 'ow',
+
+ Punctuation: 'p',
+ Punctuation.Marker: 'pm',
+
+ Comment: 'c',
+ Comment.Hashbang: 'ch',
+ Comment.Multiline: 'cm',
+ Comment.Preproc: 'cp',
+ Comment.PreprocFile: 'cpf',
+ Comment.Single: 'c1',
+ Comment.Special: 'cs',
+
+ Generic: 'g',
+ Generic.Deleted: 'gd',
+ Generic.Emph: 'ge',
+ Generic.Error: 'gr',
+ Generic.Heading: 'gh',
+ Generic.Inserted: 'gi',
+ Generic.Output: 'go',
+ Generic.Prompt: 'gp',
+ Generic.Strong: 'gs',
+ Generic.Subheading: 'gu',
+ Generic.Traceback: 'gt',
+}
diff --git a/pygments/unistring.py b/pygments/unistring.py
new file mode 100644
index 0000000..2e3c808
--- /dev/null
+++ b/pygments/unistring.py
@@ -0,0 +1,153 @@
+"""
+ pygments.unistring
+ ~~~~~~~~~~~~~~~~~~
+
+ Strings of all Unicode characters of a certain category.
+ Used for matching in Unicode-aware languages. Run to regenerate.
+
+ Inspired by chartypes_create.py from the MoinMoin project.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+Cc = '\x00-\x1f\x7f-\x9f'
+
+Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
+
+Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
+
+Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd'
+
+Cs = '\ud800-\udbff\\\udc00\udc01-\udfff'
+
+Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943'
+
+Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1'
+
+Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
+
+Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
+
+Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921'
+
+Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
+
+Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
+
+Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef'
+
+Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959'
+
+Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e'
+
+No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c'
+
+Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
+
+Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
+
+Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
+
+Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
+
+Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
+
+Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f"
+
+Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
+
+Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0'
+
+Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff'
+
+Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
+
+So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d'
+
+Zl = '\u2028'
+
+Zp = '\u2029'
+
+Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
+
+xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
+
+xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
+
+cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
+
+# Generated from unidata 11.0.0
+
+def combine(*args):
+ return ''.join(globals()[cat] for cat in args)
+
+
+def allexcept(*args):
+ newcats = cats[:]
+ for arg in args:
+ newcats.remove(arg)
+ return ''.join(globals()[cat] for cat in newcats)
+
+
+def _handle_runs(char_list): # pragma: no cover
+ buf = []
+ for c in char_list:
+ if len(c) == 1:
+ if buf and buf[-1][1] == chr(ord(c)-1):
+ buf[-1] = (buf[-1][0], c)
+ else:
+ buf.append((c, c))
+ else:
+ buf.append((c, c))
+ for a, b in buf:
+ if a == b:
+ yield a
+ else:
+ yield '%s-%s' % (a, b)
+
+
+if __name__ == '__main__': # pragma: no cover
+ import unicodedata
+
+ categories = {'xid_start': [], 'xid_continue': []}
+
+ with open(__file__) as fp:
+ content = fp.read()
+
+ header = content[:content.find('Cc =')]
+ footer = content[content.find("def combine("):]
+
+ for code in range(0x110000):
+ c = chr(code)
+ cat = unicodedata.category(c)
+ if ord(c) == 0xdc00:
+ # Hack to avoid combining this combining with the preceding high
+ # surrogate, 0xdbff, when doing a repr.
+ c = '\\' + c
+ elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
+ # Escape regex metachars.
+ c = '\\' + c
+ categories.setdefault(cat, []).append(c)
+ # XID_START and XID_CONTINUE are special categories used for matching
+ # identifiers in Python 3.
+ if c.isidentifier():
+ categories['xid_start'].append(c)
+ if ('a' + c).isidentifier():
+ categories['xid_continue'].append(c)
+
+ with open(__file__, 'w') as fp:
+ fp.write(header)
+
+ for cat in sorted(categories):
+ val = ''.join(_handle_runs(categories[cat]))
+ fp.write('%s = %a\n\n' % (cat, val))
+
+ cats = sorted(categories)
+ cats.remove('xid_start')
+ cats.remove('xid_continue')
+ fp.write('cats = %r\n\n' % cats)
+
+ fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
+
+ fp.write(footer)
diff --git a/pygments/util.py b/pygments/util.py
new file mode 100644
index 0000000..8032962
--- /dev/null
+++ b/pygments/util.py
@@ -0,0 +1,308 @@
+"""
+ pygments.util
+ ~~~~~~~~~~~~~
+
+ Utility functions.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from io import TextIOWrapper
+
+
+split_path_re = re.compile(r'[/\\ ]')
+doctype_lookup_re = re.compile(r'''
+ <!DOCTYPE\s+(
+ [a-zA-Z_][a-zA-Z0-9]*
+ (?: \s+ # optional in HTML5
+ [a-zA-Z_][a-zA-Z0-9]*\s+
+ "[^"]*")?
+ )
+ [^>]*>
+''', re.DOTALL | re.MULTILINE | re.VERBOSE)
+tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
+ re.IGNORECASE | re.DOTALL | re.MULTILINE)
+xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
+
+
+class ClassNotFound(ValueError):
+ """Raised if one of the lookup functions didn't find a matching class."""
+
+
+class OptionError(Exception):
+ pass
+
+
+def get_choice_opt(options, optname, allowed, default=None, normcase=False):
+ string = options.get(optname, default)
+ if normcase:
+ string = string.lower()
+ if string not in allowed:
+ raise OptionError('Value for option %s must be one of %s' %
+ (optname, ', '.join(map(str, allowed))))
+ return string
+
+
+def get_bool_opt(options, optname, default=None):
+ string = options.get(optname, default)
+ if isinstance(string, bool):
+ return string
+ elif isinstance(string, int):
+ return bool(string)
+ elif not isinstance(string, str):
+ raise OptionError('Invalid type %r for option %s; use '
+ '1/0, yes/no, true/false, on/off' % (
+ string, optname))
+ elif string.lower() in ('1', 'yes', 'true', 'on'):
+ return True
+ elif string.lower() in ('0', 'no', 'false', 'off'):
+ return False
+ else:
+ raise OptionError('Invalid value %r for option %s; use '
+ '1/0, yes/no, true/false, on/off' % (
+ string, optname))
+
+
+def get_int_opt(options, optname, default=None):
+ string = options.get(optname, default)
+ try:
+ return int(string)
+ except TypeError:
+ raise OptionError('Invalid type %r for option %s; you '
+ 'must give an integer value' % (
+ string, optname))
+ except ValueError:
+ raise OptionError('Invalid value %r for option %s; you '
+ 'must give an integer value' % (
+ string, optname))
+
+
+def get_list_opt(options, optname, default=None):
+ val = options.get(optname, default)
+ if isinstance(val, str):
+ return val.split()
+ elif isinstance(val, (list, tuple)):
+ return list(val)
+ else:
+ raise OptionError('Invalid type %r for option %s; you '
+ 'must give a list value' % (
+ val, optname))
+
+
+def docstring_headline(obj):
+ if not obj.__doc__:
+ return ''
+ res = []
+ for line in obj.__doc__.strip().splitlines():
+ if line.strip():
+ res.append(" " + line.strip())
+ else:
+ break
+ return ''.join(res).lstrip()
+
+
+def make_analysator(f):
+ """Return a static text analyser function that returns float values."""
+ def text_analyse(text):
+ try:
+ rv = f(text)
+ except Exception:
+ return 0.0
+ if not rv:
+ return 0.0
+ try:
+ return min(1.0, max(0.0, float(rv)))
+ except (ValueError, TypeError):
+ return 0.0
+ text_analyse.__doc__ = f.__doc__
+ return staticmethod(text_analyse)
+
+
+def shebang_matches(text, regex):
+ r"""Check if the given regular expression matches the last part of the
+ shebang if one exists.
+
+ >>> from pygments.util import shebang_matches
+ >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
+ True
+ >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
+ True
+ >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
+ False
+ >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
+ False
+ >>> shebang_matches('#!/usr/bin/startsomethingwith python',
+ ... r'python(2\.\d)?')
+ True
+
+ It also checks for common windows executable file extensions::
+
+ >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
+ True
+
+ Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
+ the same as ``'perl -e'``)
+
+ Note that this method automatically searches the whole string (eg:
+ the regular expression is wrapped in ``'^$'``)
+ """
+ index = text.find('\n')
+ if index >= 0:
+ first_line = text[:index].lower()
+ else:
+ first_line = text.lower()
+ if first_line.startswith('#!'):
+ try:
+ found = [x for x in split_path_re.split(first_line[2:].strip())
+ if x and not x.startswith('-')][-1]
+ except IndexError:
+ return False
+ regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
+ if regex.search(found) is not None:
+ return True
+ return False
+
+
+def doctype_matches(text, regex):
+ """Check if the doctype matches a regular expression (if present).
+
+ Note that this method only checks the first part of a DOCTYPE.
+ eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
+ """
+ m = doctype_lookup_re.search(text)
+ if m is None:
+ return False
+ doctype = m.group(1)
+ return re.compile(regex, re.I).match(doctype.strip()) is not None
+
+
+def html_doctype_matches(text):
+ """Check if the file looks like it has a html doctype."""
+ return doctype_matches(text, r'html')
+
+
+_looks_like_xml_cache = {}
+
+
+def looks_like_xml(text):
+ """Check if a doctype exists or if we have some tags."""
+ if xml_decl_re.match(text):
+ return True
+ key = hash(text)
+ try:
+ return _looks_like_xml_cache[key]
+ except KeyError:
+ m = doctype_lookup_re.search(text)
+ if m is not None:
+ return True
+ rv = tag_re.search(text[:1000]) is not None
+ _looks_like_xml_cache[key] = rv
+ return rv
+
+
+def surrogatepair(c):
+ """Given a unicode character code with length greater than 16 bits,
+ return the two 16 bit surrogate pair.
+ """
+ # From example D28 of:
+ # http://www.unicode.org/book/ch03.pdf
+ return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
+
+
+def format_lines(var_name, seq, raw=False, indent_level=0):
+ """Formats a sequence of strings for output."""
+ lines = []
+ base_indent = ' ' * indent_level * 4
+ inner_indent = ' ' * (indent_level + 1) * 4
+ lines.append(base_indent + var_name + ' = (')
+ if raw:
+ # These should be preformatted reprs of, say, tuples.
+ for i in seq:
+ lines.append(inner_indent + i + ',')
+ else:
+ for i in seq:
+ # Force use of single quotes
+ r = repr(i + '"')
+ lines.append(inner_indent + r[:-2] + r[-1] + ',')
+ lines.append(base_indent + ')')
+ return '\n'.join(lines)
+
+
+def duplicates_removed(it, already_seen=()):
+ """
+ Returns a list with duplicates removed from the iterable `it`.
+
+ Order is preserved.
+ """
+ lst = []
+ seen = set()
+ for i in it:
+ if i in seen or i in already_seen:
+ continue
+ lst.append(i)
+ seen.add(i)
+ return lst
+
+
+class Future:
+ """Generic class to defer some work.
+
+ Handled specially in RegexLexerMeta, to support regex string construction at
+ first use.
+ """
+ def get(self):
+ raise NotImplementedError
+
+
+def guess_decode(text):
+ """Decode *text* with guessed encoding.
+
+ First try UTF-8; this should fail for non-UTF-8 encodings.
+ Then try the preferred locale encoding.
+ Fall back to latin-1, which always works.
+ """
+ try:
+ text = text.decode('utf-8')
+ return text, 'utf-8'
+ except UnicodeDecodeError:
+ try:
+ import locale
+ prefencoding = locale.getpreferredencoding()
+ text = text.decode()
+ return text, prefencoding
+ except (UnicodeDecodeError, LookupError):
+ text = text.decode('latin1')
+ return text, 'latin1'
+
+
+def guess_decode_from_terminal(text, term):
+ """Decode *text* coming from terminal *term*.
+
+ First try the terminal encoding, if given.
+ Then try UTF-8. Then try the preferred locale encoding.
+ Fall back to latin-1, which always works.
+ """
+ if getattr(term, 'encoding', None):
+ try:
+ text = text.decode(term.encoding)
+ except UnicodeDecodeError:
+ pass
+ else:
+ return text, term.encoding
+ return guess_decode(text)
+
+
+def terminal_encoding(term):
+ """Return our best guess of encoding for the given *term*."""
+ if getattr(term, 'encoding', None):
+ return term.encoding
+ import locale
+ return locale.getpreferredencoding()
+
+
+class UnclosingTextIOWrapper(TextIOWrapper):
+ # Don't close underlying buffer on destruction.
+ def close(self):
+ self.flush()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..ddbb43c
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools >= 40.9.0",
+ "wheel",
+]
+build-backend = "setuptools.build_meta:__legacy__"
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..05a0b12
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,7 @@
+pytest-cov
+pytest-randomly
+pytest>=7.0
+pyflakes
+pylint
+tox
+wcag-contrast-ratio
diff --git a/scripts/check_crlf.py b/scripts/check_crlf.py
new file mode 100644
index 0000000..c03b68d
--- /dev/null
+++ b/scripts/check_crlf.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+"""
+ Checker for line endings
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Make sure Python (.py) and Bash completion (.bashcomp) files do not
+ contain CR/LF newlines.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import os
+
+if __name__ == '__main__':
+ for directory in sys.argv[1:]:
+ if not os.path.exists(directory):
+ continue
+
+ for root, dirs, files in os.walk(directory):
+ for filename in files:
+ if not filename.endswith('.py') and not filename.endswith('.bashcomp'):
+ continue
+
+ full_path = os.path.join(root, filename)
+ with open(full_path, 'rb') as f:
+ if b'\r\n' in f.read():
+ print('CR/LF found in', full_path)
+ sys.exit(1)
+
+ sys.exit(0)
diff --git a/scripts/check_repeated_token.py b/scripts/check_repeated_token.py
new file mode 100755
index 0000000..1636281
--- /dev/null
+++ b/scripts/check_repeated_token.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+ Checker for repeated tokens
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Helper script to find suspicious lexers which produce the same token
+ repeatedly, i.e. for example:
+
+ .. code::
+
+ 'd' Text
+ 'a' Text
+ 't' Text
+ 'a' Text
+ 'b' Text
+ 'a' Text
+ 's' Text
+ 'e' Text
+
+ This script has two test modes: Check for tokens repeating more often than
+ a given threshold, and exclude anything but single-character tokens.
+ Repeated single-character tokens are quite problematic as they result in
+ bloated output and are usually an indication that someone is missing
+ a + or * in the regex.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import argparse
+import sys
+
+from utility import unpack_output_file, process_output_files
+
+
+def check_file(path, threshold, single_only):
+ current_token = ''
+ current_token_repeat_count = 1
+
+ for value, token, linenumber in unpack_output_file(path):
+ if single_only and len(value) > 1:
+ token = ''
+ current_token_repeat_count = 1
+ continue
+
+ if token != current_token:
+ current_token = token
+ current_token_repeat_count = 1
+ else:
+ current_token_repeat_count += 1
+
+ if current_token_repeat_count > threshold:
+ print(f'{path}:{linenumber}')
+ return False
+
+ return True
+
+
+def main(args):
+ def check_file_callback(path):
+ return check_file(path, args.threshold, args.single)
+
+ if process_output_files(args.TEST_ROOT, check_file_callback) > 0:
+ return 1
+ return 0
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('TEST_ROOT',
+ help='Root directory containing the tests')
+ parser.add_argument('-t', '--threshold', type=int, default=5,
+ help='Warn if a token repeats itself more often then '
+ 'this number.')
+ parser.add_argument('-s', '--single', action='store_true', default=False,
+ help='Only look at tokens matching a single character')
+ args = parser.parse_args()
+ sys.exit(main(args))
diff --git a/scripts/check_sources.py b/scripts/check_sources.py
new file mode 100755
index 0000000..1feb1a3
--- /dev/null
+++ b/scripts/check_sources.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+"""
+ Checker for file headers
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Make sure each Python file has a correct file header
+ including copyright and license information.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import io
+import os
+import re
+import sys
+import getopt
+from os.path import join, splitext, abspath
+
+
+checkers = {}
+
+
+def checker(*suffixes, **kwds):
+ only_pkg = kwds.pop('only_pkg', False)
+
+ def deco(func):
+ for suffix in suffixes:
+ checkers.setdefault(suffix, []).append(func)
+ func.only_pkg = only_pkg
+ return func
+ return deco
+
+
+name_mail_re = r'[\w ]+(<.*?>)?'
+copyright_re = re.compile(r'^ :copyright: Copyright 2006-2022 by '
+ r'the Pygments team, see AUTHORS\.$')
+copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
+ (name_mail_re, name_mail_re))
+is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
+
+misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
+ "informations", "unlexer"] # ALLOW-MISSPELLING
+
+
+@checker('.py')
+def check_syntax(fn, lines):
+ if not lines:
+ yield 0, "empty file"
+ return
+ if '#!/' in lines[0]:
+ lines = lines[1:]
+ try:
+ compile('\n'.join(lines), fn, "exec")
+ except SyntaxError as err:
+ yield 0, "not compilable: %s" % err
+
+
+@checker('.py')
+def check_style_and_encoding(fn, lines):
+ for lno, line in enumerate(lines):
+ if is_const_re.search(line):
+ yield lno+1, 'using == None/True/False'
+
+
+@checker('.py', only_pkg=True)
+def check_fileheader(fn, lines):
+ # line number correction
+ c = 1
+ if lines[0:1] == ['#!/usr/bin/env python']:
+ lines = lines[1:]
+ c = 2
+
+ llist = []
+ docopen = False
+ for lno, line in enumerate(lines):
+ llist.append(line)
+ if lno == 0:
+ if line != '"""' and line != 'r"""':
+ yield 2, f'missing docstring begin ("""), found {line!r}'
+ else:
+ docopen = True
+ elif docopen:
+ if line == '"""':
+ # end of docstring
+ if lno <= 3:
+ yield lno+c, "missing module name in docstring"
+ break
+
+ if line != "" and line[:4] != ' ' and docopen:
+ yield lno+c, "missing correct docstring indentation"
+
+ if lno == 1:
+ # if not in package, don't check the module name
+ modname = fn[:-3].replace('/', '.').replace('.__init__', '')
+ while modname:
+ if line.lower()[4:] == modname:
+ break
+ modname = '.'.join(modname.split('.')[1:])
+ else:
+ yield 3, "wrong module name in docstring heading"
+ modnamelen = len(line.strip())
+ elif lno == 2:
+ if line.strip() != modnamelen * "~":
+ yield 4, "wrong module name underline, should be ~~~...~"
+
+ else:
+ yield 0, "missing end and/or start of docstring..."
+
+ # check for copyright and license fields
+ license = llist[-2:-1]
+ if license != [" :license: BSD, see LICENSE for details."]:
+ yield 0, "no correct license info"
+
+ ci = -3
+ copyright = llist[ci:ci+1]
+ while copyright and copyright_2_re.match(copyright[0]):
+ ci -= 1
+ copyright = llist[ci:ci+1]
+ if not copyright or not copyright_re.match(copyright[0]):
+ yield 0, "no correct copyright info"
+
+
+def main(argv):
+ try:
+ gopts, args = getopt.getopt(argv[1:], "vi:")
+ except getopt.GetoptError:
+ print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
+ return 2
+ opts = {}
+ for opt, val in gopts:
+ if opt == '-i':
+ val = abspath(val)
+ opts.setdefault(opt, []).append(val)
+
+ if len(args) == 0:
+ path = '.'
+ elif len(args) == 1:
+ path = args[0]
+ else:
+ print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
+ return 2
+
+ verbose = '-v' in opts
+
+ num = 0
+ out = io.StringIO()
+
+ for root, dirs, files in os.walk(path):
+ for excl in ['.tox', '.git', 'examplefiles']:
+ if excl in dirs:
+ dirs.remove(excl)
+ if '-i' in opts and abspath(root) in opts['-i']:
+ del dirs[:]
+ continue
+ # XXX: awkward: for the Makefile call: don't check non-package
+ # files for file headers
+ in_pygments_pkg = root.startswith('./pygments')
+ for fn in files:
+
+ fn = join(root, fn)
+ if fn[:2] == './':
+ fn = fn[2:]
+
+ if '-i' in opts and abspath(fn) in opts['-i']:
+ continue
+
+ ext = splitext(fn)[1]
+ checkerlist = checkers.get(ext, None)
+ if not checkerlist:
+ continue
+
+ if verbose:
+ print("Checking %s..." % fn)
+
+ try:
+ with open(fn, 'rb') as f:
+ lines = f.read().decode('utf-8').splitlines()
+ except OSError as err:
+ print("%s: cannot open: %s" % (fn, err))
+ num += 1
+ continue
+
+ for checker in checkerlist:
+ if not in_pygments_pkg and checker.only_pkg:
+ continue
+ for lno, msg in checker(fn, lines):
+ print('%s:%d: %s' % (fn, lno, msg), file=out)
+ num += 1
+ if verbose:
+ print()
+ if num == 0:
+ print("No errors found.")
+ else:
+ print(out.getvalue().rstrip('\n'))
+ print("%d error%s found." % (num, num > 1 and "s" or ""))
+ return int(num > 0)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/scripts/check_whitespace_token.py b/scripts/check_whitespace_token.py
new file mode 100644
index 0000000..f5d0970
--- /dev/null
+++ b/scripts/check_whitespace_token.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+ Checker for whitespace tokens
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Helper script to find whitespace which is not of token type `Whitespace`
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import argparse
+import sys
+import re
+
+from utility import unpack_output_file, process_output_files
+
+
+def check_file(path):
+ whitespace_re = re.compile('\s+')
+
+ for value, token, linenumber in unpack_output_file(path):
+ if whitespace_re.fullmatch(value):
+ # We allow " " if it's inside a Literal.String for example
+ if 'Literal' in token:
+ continue
+
+ # If whitespace is part of a comment, we accept that as well,
+ # as comments may be similarly highlighted to literals
+ if 'Comment' in token:
+ continue
+
+ if 'Whitespace' in token:
+ continue
+
+ print(f'{path}:{linenumber}')
+ return False
+
+ return True
+
+
+def main(args):
+ if process_output_files(args.TEST_ROOT, check_file) > 0:
+ return 1
+ return 0
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('TEST_ROOT',
+ help='Root directory containing the tests')
+ args = parser.parse_args()
+ sys.exit(main(args))
diff --git a/scripts/count_token_references.py b/scripts/count_token_references.py
new file mode 100755
index 0000000..8e798c2
--- /dev/null
+++ b/scripts/count_token_references.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+"""
+Count number of references to tokens in lexer source
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+:program:`count_token_references` counts how many references to all existing
+tokens it can find by "grepping" the the source code of the lexers. This can
+be used to find typos in token names, as those tokens are only used by one lexer.
+
+:program:`count_token_references` supports the following options:
+
+.. program:: count_token_references
+
+.. option:: -v, --verbose
+ This gives output while the script is collecting information.
+
+.. option:: --minfiles <COUNT>
+ Only report about tokens that are referenced in at least this many lexer
+ source files (default 1).
+
+.. option:: --maxfiles <COUNT>
+ Only report about tokens that are referenced in at most this many lexer
+ source files (default 1).
+
+.. option:: --minlines <COUNT>
+ Only report about tokens that are referenced in at least this many lexer
+ source lines (default 1).
+
+.. option:: --maxlines <COUNT>
+ Only report about tokens that are referenced in at most this many lexer
+ source lines (default 10).
+
+.. option:: -s, --subtokens
+ When ``--subtoken`` is given each token is also counted for each of its
+ parent tokens. I.e. if we have 10 occurrences of the token
+ ``Token.Literal.Number.Integer`` and 10 occurrences of the token
+ ``Token.Literal.Number.Hex`` but none for ``Token.Literal.Number``, with
+ ``--subtoken`` ``Token.Literal.Number`` would be counted as having
+ 20 references.
+"""
+
+import sys, argparse, re, pathlib
+
+from pygments import token, lexers
+
+
+def lookup_all_lexers():
+ """
+ Iterate through all lexers and fetch them.
+ This should create all tokens that any of the lexers produce.
+ """
+ count = 0
+ for (name, aliases, patterns, mimetypes) in lexers.get_all_lexers():
+ for a in aliases:
+ l = lexers.get_lexer_by_name(a)
+ break
+ else:
+ for p in patterns:
+ l = lexers.get_lexer_for_filename(p)
+ break
+ else:
+ for m in mimetypes:
+ l = lexers.get_lexer_for_mimetype(m)
+ break
+ count += 1
+ return count
+
+
+def fetch_lexer_sources():
+ """
+ Return the source code of all lexers as a dictionary, mapping filenames
+ to a list of lines.
+ """
+ lexer_dir = (pathlib.Path(__file__).parent / "../pygments/lexers").resolve()
+ lexer_sources = {
+ fn: fn.read_text(encoding='utf-8').splitlines(keepends=False)
+ for fn in lexer_dir.glob("*.py")
+ }
+ return lexer_sources
+
+
+def sub_tokens(token):
+ """
+ Generator that yields a token and all of its sub-tokens recursively.
+ """
+ yield token
+ for subtoken in token.subtypes:
+ yield from sub_tokens(subtoken)
+
+
+class FileCount:
+ """
+ Stores information about line numbers in a file.
+
+ This is used to store from which lines in a files a certain token is
+ referenced.
+ """
+ def __init__(self, filename):
+ self.filename = filename
+ self.lines = []
+
+ def __str__(self):
+ if len(self.lines) > 3:
+ lines = ", ".join(f"{line:,}" for line in self.lines[:5])
+ lines = f"{lines}, ... ({len(lines):,} lines)"
+ else:
+ lines = ", ".join(f"{line:,}" for line in self.lines)
+ return f"{self.filename.name}[{lines}]"
+
+ def add(self, linenumber):
+ self.lines.append(linenumber)
+
+ def count_lines(self):
+ return len(self.lines)
+
+
+class TokenCount:
+ """
+ Stores information about a token and in which files it is referenced.
+ """
+ def __init__(self, token):
+ self.token = token
+ self.files = {}
+
+ def add(self, filename, linenumber):
+ if filename not in self.files:
+ self.files[filename] = FileCount(filename)
+ self.files[filename].add(linenumber)
+
+ def __str__(self):
+ if len(self.files) > 3:
+ files = []
+ for (i, filecount) in enumerate(self.files.values()):
+ files.append(str(filecount))
+ if i >= 5:
+ break
+ files = ", ".join(files) + f", ... ({len(self.files):,} files)"
+ else:
+ files = ", ".join(str(filecount) for filecount in self.files.values())
+ return f"{self.count_files():,} files, {self.count_lines():,} locations: {files}"
+
+ def count_files(self):
+ return len(self.files)
+
+ def count_lines(self):
+ return sum(fc.count_lines() for fc in self.files.values())
+
+
+def find_token_references(lexer_sources, args):
+ """
+ Find all references to all tokens in the source code of all lexers.
+
+ Note that this can't be 100% reliable, as it searches the source code for
+ certain patterns: It searches for the last two components of a token name,
+ i.e. to find references to the token ``Token.Literal.Number.Integer.Long``
+ it searches for the regular expression ``\\bInteger.Long\\b``. This
+ won't work reliably for top level token like ``Token.String`` since this
+ is often referred to as ``String``, but searching for ``\\bString\\b``
+ yields too many false positives.
+ """
+
+ # Maps token to :class:`TokenCount` objects.
+ token_references = {}
+
+ # Search for each token in each lexer source file and record in which file
+ # and in which line they are referenced
+ for t in sub_tokens(token.Token):
+ parts = list(t)[-2:]
+ if len(parts) == 0:
+ name = "Token"
+ elif len(parts) == 1:
+ name = f"Token.{parts[0]}"
+ else:
+ name = ".".join(parts)
+
+ token_references[t] = tokencount = TokenCount(t)
+
+ if name != "Token":
+ pattern = re.compile(f"\\b{name}\\b")
+
+ for (filename, sourcelines) in lexer_sources.items():
+ for (i, line) in enumerate(sourcelines, 1):
+ if pattern.search(line) is not None:
+ tokencount.add(filename, i)
+ if args.subtoken:
+ t2 = t
+ while t2 is not token.Token:
+ t2 = t2.parent
+ tokencount2 = token_references[t2]
+ tokencount2.add(filename, i)
+
+ return token_references
+
+
+def print_result(token_references, args):
+ def key(item):
+ return (item[1].count_files(), item[1].count_lines())
+
+ for (token, locations) in sorted(token_references.items(), key=key):
+ if args.minfiles <= locations.count_files() <= args.maxfiles and \
+ args.minlines <= locations.count_lines() <= args.maxlines:
+ print(f"{token}: {locations}")
+
+
+def main(args=None):
+ p = argparse.ArgumentParser(description="Count how often each token is used by the lexers")
+ p.add_argument(
+ "-v", "--verbose",
+ dest="verbose", help="Give more output.",
+ default=False, action="store_true"
+ )
+ p.add_argument(
+ "--minfiles",
+ dest="minfiles", metavar="COUNT", type=int,
+ help="Report all tokens referenced by at least COUNT lexer source files (default %(default)s)",
+ default=1
+ )
+ p.add_argument(
+ "--maxfiles",
+ dest="maxfiles", metavar="COUNT", type=int,
+ help="Report all tokens referenced by at most COUNT lexer source files (default %(default)s)",
+ default=1
+ )
+ p.add_argument(
+ "--minlines",
+ dest="minlines", metavar="COUNT", type=int,
+ help="Report all tokens referenced by at least COUNT lexer source lines (default %(default)s)",
+ default=1
+ )
+ p.add_argument(
+ "--maxlines",
+ dest="maxlines", metavar="COUNT", type=int,
+ help="Report all tokens referenced by at most COUNT lexer source lines (default %(default)s)",
+ default=10
+ )
+ p.add_argument(
+ "-s", "--subtoken",
+ dest="subtoken",
+ help="Include count of references to subtokens in the count for each token (default %(default)s)",
+ default=False, action="store_true"
+ )
+
+ args = p.parse_args(args)
+
+ if args.verbose:
+ print("Looking up all lexers ... ", end="", flush=True)
+ count = lookup_all_lexers()
+ if args.verbose:
+ print(f"found {count:,} lexers")
+
+ if args.verbose:
+ print("Fetching lexer source code ... ", end="", flush=True)
+ lexer_sources = fetch_lexer_sources()
+ if args.verbose:
+ print(f"found {len(lexer_sources):,} lexer source files")
+
+ if args.verbose:
+ print("Finding token references ... ", end="", flush=True)
+ token_references = find_token_references(lexer_sources, args)
+ if args.verbose:
+ print(f"found references to {len(token_references):,} tokens")
+
+ if args.verbose:
+ print()
+ print("Result:")
+ print_result(token_references, args)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/debug_lexer.py b/scripts/debug_lexer.py
new file mode 100755
index 0000000..6323d9c
--- /dev/null
+++ b/scripts/debug_lexer.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+"""
+ Lexing error finder
+ ~~~~~~~~~~~~~~~~~~~
+
+ For the source files given on the command line, display
+ the text where Error tokens are being generated, along
+ with some context.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import sys
+import struct
+
+# always prefer Pygments from source if exists
+srcpath = os.path.join(os.path.dirname(__file__), '..')
+if os.path.isdir(os.path.join(srcpath, 'pygments')):
+ sys.path.insert(0, srcpath)
+
+
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
+ ProfilingRegexLexer, ProfilingRegexLexerMeta
+from pygments.lexers import get_lexer_by_name, find_lexer_class, \
+ find_lexer_class_for_filename, guess_lexer
+from pygments.token import Error, Text, _TokenType
+from pygments.cmdline import _parse_options
+
+
+class DebuggingRegexLexer(ExtendedRegexLexer):
+ """Make the state stack, position and current match instance attributes."""
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ """
+ Split ``text`` into (tokentype, text) pairs.
+
+ ``stack`` is the initial stack (default: ``['root']``)
+ """
+ tokendefs = self._tokens
+ self.ctx = ctx = LexerContext(text, 0)
+ ctx.stack = list(stack)
+ statetokens = tokendefs[ctx.stack[-1]]
+ while 1:
+ for rexmatch, action, new_state in statetokens:
+ self.m = m = rexmatch(text, ctx.pos, ctx.end)
+ if m:
+ if action is not None:
+ if type(action) is _TokenType:
+ yield ctx.pos, action, m.group()
+ ctx.pos = m.end()
+ else:
+ if not isinstance(self, ExtendedRegexLexer):
+ yield from action(self, m)
+ ctx.pos = m.end()
+ else:
+ yield from action(self, m, ctx)
+ if not new_state:
+ # altered the state stack?
+ statetokens = tokendefs[ctx.stack[-1]]
+ if new_state is not None:
+ # state transition
+ if isinstance(new_state, tuple):
+ for state in new_state:
+ if state == '#pop':
+ ctx.stack.pop()
+ elif state == '#push':
+ ctx.stack.append(ctx.stack[-1])
+ else:
+ ctx.stack.append(state)
+ elif isinstance(new_state, int):
+ # pop
+ del ctx.stack[new_state:]
+ elif new_state == '#push':
+ ctx.stack.append(ctx.stack[-1])
+ else:
+ assert False, 'wrong state def: %r' % new_state
+ statetokens = tokendefs[ctx.stack[-1]]
+ break
+ else:
+ try:
+ if ctx.pos >= ctx.end:
+ break
+ if text[ctx.pos] == '\n':
+ # at EOL, reset state to 'root'
+ ctx.stack = ['root']
+ statetokens = tokendefs['root']
+ yield ctx.pos, Text, '\n'
+ ctx.pos += 1
+ continue
+ yield ctx.pos, Error, text[ctx.pos]
+ ctx.pos += 1
+ except IndexError:
+ break
+
+
+def decode_atheris(bstr):
+ """Decode a byte string into a Unicode string using the algorithm
+ of Google's Atheris fuzzer library, which aims to produce a wide
+ range of possible Unicode inputs.
+
+ Corresponds to ConsumeUnicodeImpl() with filter_surrogates=false in
+ https://github.com/google/atheris/blob/master/fuzzed_data_provider.cc
+ """
+ if len(bstr) < 2:
+ return ''
+ # The first byte only selects if the rest is decoded as ascii, "utf-16" or "utf-32"
+ spec, bstr = bstr[0], bstr[1:]
+ if spec & 1: # pure ASCII
+ return ''.join(chr(ch & 0x7f) for ch in bstr)
+ elif spec & 2: # UTF-16
+ bstr = bstr if len(bstr) % 2 == 0 else bstr[:-1]
+ return bstr.decode('utf16')
+
+ # else UTF-32
+ def valid_codepoint(ch):
+ ch &= 0x1fffff
+ if ch & 0x100000:
+ ch &= ~0x0f0000
+ return chr(ch)
+
+ chars = struct.unpack('%dI%dx' % divmod(len(bstr), 4), bstr)
+ return ''.join(map(valid_codepoint), chars)
+
+
+def main(fn, lexer=None, options={}):
+ if fn == '-':
+ text = sys.stdin.read()
+ else:
+ with open(fn, 'rb') as fp:
+ text = fp.read()
+ if decode_strategy == 'latin1':
+ try:
+ text = text.decode('utf8')
+ except UnicodeError:
+ print('Warning: non-UTF8 input, using latin1')
+ text = text.decode('latin1')
+ elif decode_strategy == 'utf8-ignore':
+ try:
+ text = text.decode('utf8')
+ except UnicodeError:
+ print('Warning: ignoring non-UTF8 bytes in input')
+ text = text.decode('utf8', 'ignore')
+ elif decode_strategy == 'atheris':
+ text = decode_atheris(text)
+
+ text = text.strip('\n') + '\n'
+
+ if lexer is not None:
+ lxcls = get_lexer_by_name(lexer).__class__
+ elif guess:
+ lxcls = guess_lexer(text).__class__
+ print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
+ lxcls.__name__))
+ else:
+ lxcls = find_lexer_class_for_filename(os.path.basename(fn))
+ if lxcls is None:
+ name, rest = fn.split('_', 1)
+ lxcls = find_lexer_class(name)
+ if lxcls is None:
+ raise AssertionError('no lexer found for file %r' % fn)
+ print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
+ lxcls.__name__))
+ debug_lexer = False
+ # if profile:
+ # # does not work for e.g. ExtendedRegexLexers
+ # if lxcls.__bases__ == (RegexLexer,):
+ # # yes we can! (change the metaclass)
+ # lxcls.__class__ = ProfilingRegexLexerMeta
+ # lxcls.__bases__ = (ProfilingRegexLexer,)
+ # lxcls._prof_sort_index = profsort
+ # else:
+ # if lxcls.__bases__ == (RegexLexer,):
+ # lxcls.__bases__ = (DebuggingRegexLexer,)
+ # debug_lexer = True
+ # elif lxcls.__bases__ == (DebuggingRegexLexer,):
+ # # already debugged before
+ # debug_lexer = True
+ # else:
+ # # HACK: ExtendedRegexLexer subclasses will only partially work here.
+ # lxcls.__bases__ = (DebuggingRegexLexer,)
+ # debug_lexer = True
+
+ lx = lxcls(**options)
+ lno = 1
+ tokens = []
+ states = []
+
+ def show_token(tok, state):
+ reprs = list(map(repr, tok))
+ print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
+ if debug_lexer:
+ print(' ' + ' ' * (29-len(reprs[0])) + ' : '.join(state)
+ if state else '', end=' ')
+ print()
+
+ for type, val in lx.get_tokens(text):
+ lno += val.count('\n')
+ if type == Error and not ignerror:
+ print('Error parsing', fn, 'on line', lno)
+ if not showall:
+ print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
+ for i in range(max(len(tokens) - num, 0), len(tokens)):
+ if debug_lexer:
+ show_token(tokens[i], states[i])
+ else:
+ show_token(tokens[i], None)
+ print('Error token:')
+ vlen = len(repr(val))
+ print(' ' + repr(val), end=' ')
+ if debug_lexer and hasattr(lx, 'ctx'):
+ print(' ' * (60-vlen) + ' : '.join(lx.ctx.stack), end=' ')
+ print()
+ print()
+ return 1
+ tokens.append((type, val))
+ if debug_lexer:
+ if hasattr(lx, 'ctx'):
+ states.append(lx.ctx.stack[:])
+ else:
+ states.append(None)
+ if showall:
+ show_token((type, val), states[-1] if debug_lexer else None)
+ return 0
+
+
+def print_help():
+ print('''\
+Pygments development helper to quickly debug lexers.
+
+ scripts/debug_lexer.py [options] file ...
+
+Give one or more filenames to lex them and display possible error tokens
+and/or profiling info. Files are assumed to be encoded in UTF-8.
+
+Selecting lexer and options:
+
+ -l NAME use lexer named NAME (default is to guess from
+ the given filenames)
+ -g guess lexer from content
+ -u if input is non-utf8, use "ignore" handler instead
+ of using latin1 encoding
+ -U use Atheris fuzzer's method of converting
+ byte input to Unicode
+ -O OPTIONSTR use lexer options parsed from OPTIONSTR
+
+Debugging lexing errors:
+
+ -n N show the last N tokens on error
+ -a always show all lexed tokens (default is only
+ to show them when an error occurs)
+ -e do not stop on error tokens
+
+Profiling:
+
+ -p use the ProfilingRegexLexer to profile regexes
+ instead of the debugging lexer
+ -s N sort profiling output by column N (default is
+ column 4, the time per call)
+''')
+
+
+num = 10
+showall = False
+ignerror = False
+lexer = None
+options = {}
+profile = False
+profsort = 4
+guess = False
+decode_strategy = 'latin1'
+
+if __name__ == '__main__':
+ import getopt
+ opts, args = getopt.getopt(sys.argv[1:], 'n:l:aepO:s:hguU')
+ for opt, val in opts:
+ if opt == '-n':
+ num = int(val)
+ elif opt == '-a':
+ showall = True
+ elif opt == '-e':
+ ignerror = True
+ elif opt == '-l':
+ lexer = val
+ elif opt == '-p':
+ profile = True
+ elif opt == '-s':
+ profsort = int(val)
+ elif opt == '-O':
+ options = _parse_options([val])
+ elif opt == '-g':
+ guess = True
+ elif opt == '-u':
+ decode_strategy = 'utf8-ignore'
+ elif opt == '-U':
+ decode_strategy = 'atheris'
+ elif opt == '-h':
+ print_help()
+ sys.exit(0)
+ ret = 0
+ if not args:
+ print_help()
+ for f in args:
+ ret += main(f, lexer, options)
+ sys.exit(bool(ret))
diff --git a/scripts/detect_missing_analyse_text.py b/scripts/detect_missing_analyse_text.py
new file mode 100644
index 0000000..c377b0b
--- /dev/null
+++ b/scripts/detect_missing_analyse_text.py
@@ -0,0 +1,48 @@
+"""
+ detect_missing_analyse_text
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+
+from pygments.lexers import get_all_lexers, find_lexer_class
+from pygments.lexer import Lexer
+
+import argparse
+
+
+def main(args):
+ uses = {}
+
+ for name, aliases, filenames, mimetypes in get_all_lexers(plugins=False):
+ cls = find_lexer_class(name)
+ if not cls.aliases and not args.skip_no_aliases:
+ print(cls, "has no aliases")
+ for f in filenames:
+ uses.setdefault(f, []).append(cls)
+
+ ret = 0
+ for k, v in uses.items():
+ if len(v) > 1:
+ # print("Multiple for", k, v)
+ for i in v:
+ if i.analyse_text is None:
+ print(i, "has a None analyse_text")
+ ret |= 1
+ elif Lexer.analyse_text.__doc__ == i.analyse_text.__doc__:
+ print(i, "needs analyse_text, multiple lexers for", k)
+ ret |= 2
+ return ret
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--skip-no-aliases',
+ help='Skip checks for a lexer with no aliases',
+ action='store_true',
+ default=False)
+ args = parser.parse_args()
+ sys.exit(main(args))
diff --git a/scripts/gen_mapfiles.py b/scripts/gen_mapfiles.py
new file mode 100644
index 0000000..a5aed0c
--- /dev/null
+++ b/scripts/gen_mapfiles.py
@@ -0,0 +1,53 @@
+"""
+ scripts/gen_mapfiles.py
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Regenerate mapping files.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from importlib import import_module
+from pathlib import Path
+import re
+import sys
+
+top_src_dir = Path(__file__).parent.parent
+pygments_package = top_src_dir / 'pygments'
+sys.path.insert(0, str(pygments_package.parent.resolve()))
+
+from pygments.util import docstring_headline
+
+def main():
+ for key in ['lexers', 'formatters']:
+ lines = []
+ for file in (pygments_package / key).glob('[!_]*.py'):
+ module_name = '.'.join(file.relative_to(pygments_package.parent).with_suffix('').parts)
+ print(module_name)
+ module = import_module(module_name)
+ for obj_name in module.__all__:
+ obj = getattr(module, obj_name)
+ desc = (module_name, obj.name, tuple(obj.aliases), tuple(obj.filenames))
+ if key == 'lexers':
+ desc += (tuple(obj.mimetypes),)
+ elif key == 'formatters':
+ desc += (docstring_headline(obj),)
+ else:
+ assert False
+ lines.append(f' {obj_name!r}: {desc!r},')
+ # Sort to make diffs minimal.
+ lines.sort()
+ new_dict = '\n'.join(lines)
+ content = f'''# Automatically generated by scripts/gen_mapfiles.py.
+# DO NOT EDIT BY HAND; run `make mapfiles` instead.
+
+{key.upper()} = {{
+{new_dict}
+}}
+'''
+ (pygments_package / key / '_mapping.py').write_text(content, encoding='utf8')
+ print(f'=== {len(lines)} {key} processed.')
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/get_css_properties.py b/scripts/get_css_properties.py
new file mode 100644
index 0000000..3afe98e
--- /dev/null
+++ b/scripts/get_css_properties.py
@@ -0,0 +1,33 @@
+"""
+ get_css_properties
+ ~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+from pygments.util import format_lines
+import json
+import urllib.request
+
+HEADER = '''\
+"""
+ pygments.lexers._css_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file is autogenerated by scripts/get_css_properties.py
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+'''
+
+if __name__ == "__main__":
+ data_request = urllib.request.urlopen('https://www.w3.org/Style/CSS/all-properties.en.json')
+ data = json.load(data_request)
+ names = set([p['property'] for p in data if p['property'] != '--*'])
+
+ with open('../pygments/lexers/_css_builtins.py', 'w') as builtin_file:
+ builtin_file.write(HEADER)
+ builtin_file.write(format_lines('_css_properties', sorted(names)))
diff --git a/scripts/get_vimkw.py b/scripts/get_vimkw.py
new file mode 100644
index 0000000..0b2d82e
--- /dev/null
+++ b/scripts/get_vimkw.py
@@ -0,0 +1,72 @@
+import re
+
+from pygments.util import format_lines
+
+r_line = re.compile(r"^(syn keyword vimCommand contained|syn keyword vimOption "
+ r"contained|syn keyword vimAutoEvent contained)\s+(.*)")
+r_item = re.compile(r"(\w+)(?:\[(\w+)\])?")
+
+HEADER = '''\
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers._vim_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file is autogenerated by scripts/get_vimkw.py
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# Split up in multiple functions so it's importable by jython, which has a
+# per-method size limit.
+'''
+
+METHOD = '''\
+def _get%(key)s():
+%(body)s
+ return var
+%(key)s = _get%(key)s()
+'''
+
+def getkw(input, output):
+ out = file(output, 'w')
+
+ # Copy template from an existing file.
+ print(HEADER, file=out)
+
+ output_info = {'command': [], 'option': [], 'auto': []}
+ for line in file(input):
+ m = r_line.match(line)
+ if m:
+ # Decide which output gets mapped to d
+ if 'vimCommand' in m.group(1):
+ d = output_info['command']
+ elif 'AutoEvent' in m.group(1):
+ d = output_info['auto']
+ else:
+ d = output_info['option']
+
+ # Extract all the shortened versions
+ for i in r_item.finditer(m.group(2)):
+ d.append('(%r,%r)' %
+ (i.group(1), "%s%s" % (i.group(1), i.group(2) or '')))
+
+ output_info['option'].append("('nnoremap','nnoremap')")
+ output_info['option'].append("('inoremap','inoremap')")
+ output_info['option'].append("('vnoremap','vnoremap')")
+
+ for key, keywordlist in output_info.items():
+ keywordlist.sort()
+ body = format_lines('var', keywordlist, raw=True, indent_level=1)
+ print(METHOD % locals(), file=out)
+
+def is_keyword(w, keywords):
+ for i in range(len(w), 0, -1):
+ if w[:i] in keywords:
+ return keywords[w[:i]][:len(w)] == w
+ return False
+
+if __name__ == "__main__":
+ getkw("/usr/share/vim/vim74/syntax/vim.vim",
+ "pygments/lexers/_vim_builtins.py")
diff --git a/scripts/pylintrc b/scripts/pylintrc
new file mode 100644
index 0000000..b602eaa
--- /dev/null
+++ b/scripts/pylintrc
@@ -0,0 +1,301 @@
+# lint Python modules using external checkers.
+#
+# This is the main checker controlling the other ones and the reports
+# generation. It is itself both a raw checker and an astng checker in order
+# to:
+# * handle message activation / deactivation at the module level
+# * handle some basic but necessary stats'data (number of classes, methods...)
+#
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Profiled execution.
+profile=no
+
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+ignore=.svn
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Set the cache size for astng objects.
+cache-size=500
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable only checker(s) with the given id(s). This option conflict with the
+# disable-checker option
+#enable-checker=
+
+# Enable all checker(s) except those with the given id(s). This option conflict
+# with the disable-checker option
+#disable-checker=
+
+# Enable all messages in the listed categories.
+#enable-msg-cat=
+
+# Disable all messages in the listed categories.
+#disable-msg-cat=
+
+# Enable the message(s) with the given id(s).
+#enable-msg=
+
+# Disable the message(s) with the given id(s).
+disable-msg=C0323,W0142,C0301,C0103,C0111,E0213,C0302,C0203,W0703,R0201
+
+
+[REPORTS]
+
+# set the output format. Available formats are text, parseable, colorized and
+# html
+output-format=colorized
+
+# Include message's id in output
+include-ids=yes
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note).You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (R0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (R0004).
+comment=no
+
+# Enable the report(s) with the given id(s).
+#enable-report=
+
+# Disable the report(s) with the given id(s).
+#disable-report=
+
+
+# checks for
+# * unused variables / imports
+# * undefined variables
+# * redefinition of variable from builtins or from an outer scope
+# * use of variable before assignment
+#
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=_|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+# try to find bugs in the code using type inference
+#
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# When zope mode is activated, consider the acquired-members option to ignore
+# access to some undefined attributes.
+zope=no
+
+# List of members which are usually get through zope's acquisition mechanism and
+# so shouldn't trigger E0201 when accessed (need zope=yes to be considered).
+acquired-members=REQUEST,acl_users,aq_parent
+
+
+# checks for :
+# * doc strings
+# * modules / classes / functions / methods / arguments / variables name
+# * number of arguments, local variables, branches, returns and statements in
+# functions, methods
+# * required module attributes
+# * dangerous default values as arguments
+# * redefinition of function / method / class
+# * uses of the global statement
+#
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=apply,input
+
+
+# checks for sign of poor/misdesign:
+# * number of methods, attributes, local variables...
+# * size, complexity of functions, methods
+#
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=12
+
+# Maximum number of locals for function / method body
+max-locals=30
+
+# Maximum number of return / yield for function / method body
+max-returns=12
+
+# Maximum number of branch for function / method body
+max-branchs=30
+
+# Maximum number of statements in function / method body
+max-statements=60
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=20
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=0
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+# checks for
+# * external modules dependencies
+# * relative / wildcard imports
+# * cyclic imports
+# * uses of deprecated modules
+#
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report R0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report R0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report R0402 must
+# not be disabled)
+int-import-graph=
+
+
+# checks for :
+# * methods without self as first argument
+# * overridden methods signature
+# * access only to existent members via self
+# * attributes not defined in the __init__ method
+# * supported interfaces implementation
+# * unreachable code
+#
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+
+# checks for similarities and duplicated code. This computation may be
+# memory / CPU intensive, so you should disable it if you experiments some
+# problems.
+#
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=10
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+
+# checks for:
+# * warning notes in the code like FIXME, XXX
+# * PEP 263: source code with non ascii character but no encoding declaration
+#
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+# checks for :
+# * unauthorized constructions
+# * strict indentation
+# * line length
+# * use of <> instead of !=
+#
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=90
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
diff --git a/scripts/release-checklist b/scripts/release-checklist
new file mode 100644
index 0000000..087917f
--- /dev/null
+++ b/scripts/release-checklist
@@ -0,0 +1,24 @@
+Release checklist
+=================
+
+* Check ``git status``
+* ``make check``
+* LATER when configured properly: ``make pylint``
+* ``tox``
+* Update version in ``pygments/__init__.py``
+* Check setup.py metadata: long description, trove classifiers
+* Update release date/code name in ``CHANGES``
+* ``git commit``
+* Wait for the CI to finish
+* ``make clean``
+* ``python3 -m build``
+* Check the size of the generated packages. If they're significantly different from the last release, check if the repository is in a modified state and that ``make clean`` was run.
+* ``twine upload dist/Pygments-$NEWVER*``
+* Check PyPI release page for obvious errors (like different file sizes!)
+* ``git tag -a``
+* Add new ``CHANGES`` heading for next version
+* ``git commit``
+* ``git push``, ``git push --tags``
+* Add new release on https://github.com/pygments/pygments/releases
+* Add new milestone on https://github.com/pygments/pygments/milestones if needed
+* Write announcement and send to mailing list/python-announce
diff --git a/scripts/update_contrasts.py b/scripts/update_contrasts.py
new file mode 100755
index 0000000..156bc5c
--- /dev/null
+++ b/scripts/update_contrasts.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+"""
+ Updates tests/contrast/min_contrasts.json
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Whenever you have improved the minimum contrast of a style you should run
+ this script, so that the test_contrasts.py test prevents future degredations.
+"""
+
+import os
+import sys
+
+# always prefer Pygments from source if exists
+srcpath = os.path.join(os.path.dirname(__file__), "..")
+if os.path.isdir(os.path.join(srcpath, "pygments")):
+ sys.path.insert(0, srcpath)
+
+import tests.contrast.test_contrasts
+
+tests.contrast.test_contrasts.test_contrasts(fail_if_improved=False)
+tests.contrast.test_contrasts.update_json()
diff --git a/scripts/utility.py b/scripts/utility.py
new file mode 100644
index 0000000..4d59a1b
--- /dev/null
+++ b/scripts/utility.py
@@ -0,0 +1,69 @@
+"""
+ Utility functions for test scripts
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import os.path
+
+
+def unpack_output_file(path):
+ """
+ Unpack an output file into objects contining the line number, the text,
+ and the token name. The output file can be either a ``.output`` file
+ containing a token stream, or a ``.txt`` with input and tokens.
+ """
+ from collections import namedtuple
+ entry = namedtuple('OutputEntry', ['text', 'token', 'linenumber'])
+
+ skip_until_tokens = path.endswith('.txt')
+
+ for linenumber, line in enumerate(open(path).readlines()):
+ line = line.strip()
+ if not line:
+ continue
+
+ if skip_until_tokens:
+ if line != '---tokens---':
+ continue
+ else:
+ skip_until_tokens = False
+ continue
+
+ # Line can start with ' or ", so let's check which one it is
+ # and find the matching one
+ quotation_start = 0
+ quotation_end = line.rfind(line[0])
+ text = line[quotation_start+1:quotation_end]
+ token = line.split()[-1]
+ text = text.replace('\\n', '\n')
+ text = text.replace('\\t', '\t')
+ yield entry(text, token, linenumber + 1)
+
+
+def process_output_files(root_directory, callback):
+ """
+ Process all output (i.e. .output and .txt files for snippets) files
+ in a directory tree using the provided callback.
+ The callback should return ``True`` in case of success, ``False``
+ otherwise.
+
+ The function returns the number of files for which the callback returned
+ ``False``.
+ """
+ errors = 0
+ for dir, _, files in os.walk(root_directory):
+ for file in files:
+ _, ext = os.path.splitext(file)
+
+ if ext not in {'.txt', '.output'}:
+ continue
+
+ path = os.path.join(dir, file)
+ if not callback(path):
+ errors += 1
+
+ return errors
diff --git a/scripts/vim2pygments.py b/scripts/vim2pygments.py
new file mode 100755
index 0000000..ec9b63b
--- /dev/null
+++ b/scripts/vim2pygments.py
@@ -0,0 +1,932 @@
+#!/usr/bin/env python
+"""
+ Vim Colorscheme Converter
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This script converts vim colorscheme files to valid pygments
+ style classes meant for putting into modules.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import re
+from os import path
+from io import StringIO
+
+split_re = re.compile(r'(?<!\\)\s+')
+
+SCRIPT_NAME = 'Vim Colorscheme Converter'
+SCRIPT_VERSION = '0.1'
+
+
+COLORS = {
+ # Numeric Colors
+ '0': '#000000',
+ '1': '#c00000',
+ '2': '#008000',
+ '3': '#808000',
+ '4': '#0000c0',
+ '5': '#c000c0',
+ '6': '#008080',
+ '7': '#c0c0c0',
+ '8': '#808080',
+ '9': '#ff6060',
+ '10': '#00ff00',
+ '11': '#ffff00',
+ '12': '#8080ff',
+ '13': '#ff40ff',
+ '14': '#00ffff',
+ '15': '#ffffff',
+ # Named Colors
+ 'alice': '#f0f8ff',
+ 'aliceblue': '#f0f8ff',
+ 'antique': '#faebd7',
+ 'antiquewhite': '#faebd7',
+ 'antiquewhite1': '#ffefdb',
+ 'antiquewhite2': '#eedfcc',
+ 'antiquewhite3': '#cdc0b0',
+ 'antiquewhite4': '#8b8378',
+ 'aquamarine': '#7fffd4',
+ 'aquamarine1': '#7fffd4',
+ 'aquamarine2': '#76eec6',
+ 'aquamarine3': '#66cdaa',
+ 'aquamarine4': '#458b74',
+ 'azure': '#f0ffff',
+ 'azure1': '#f0ffff',
+ 'azure2': '#e0eeee',
+ 'azure3': '#c1cdcd',
+ 'azure4': '#838b8b',
+ 'beige': '#f5f5dc',
+ 'bisque': '#ffe4c4',
+ 'bisque1': '#ffe4c4',
+ 'bisque2': '#eed5b7',
+ 'bisque3': '#cdb79e',
+ 'bisque4': '#8b7d6b',
+ 'black': '#000000',
+ 'blanched': '#ffebcd',
+ 'blanchedalmond': '#ffebcd',
+ 'blue': '#8a2be2',
+ 'blue1': '#0000ff',
+ 'blue2': '#0000ee',
+ 'blue3': '#0000cd',
+ 'blue4': '#00008b',
+ 'blueviolet': '#8a2be2',
+ 'brown': '#a52a2a',
+ 'brown1': '#ff4040',
+ 'brown2': '#ee3b3b',
+ 'brown3': '#cd3333',
+ 'brown4': '#8b2323',
+ 'burlywood': '#deb887',
+ 'burlywood1': '#ffd39b',
+ 'burlywood2': '#eec591',
+ 'burlywood3': '#cdaa7d',
+ 'burlywood4': '#8b7355',
+ 'cadet': '#5f9ea0',
+ 'cadetblue': '#5f9ea0',
+ 'cadetblue1': '#98f5ff',
+ 'cadetblue2': '#8ee5ee',
+ 'cadetblue3': '#7ac5cd',
+ 'cadetblue4': '#53868b',
+ 'chartreuse': '#7fff00',
+ 'chartreuse1': '#7fff00',
+ 'chartreuse2': '#76ee00',
+ 'chartreuse3': '#66cd00',
+ 'chartreuse4': '#458b00',
+ 'chocolate': '#d2691e',
+ 'chocolate1': '#ff7f24',
+ 'chocolate2': '#ee7621',
+ 'chocolate3': '#cd661d',
+ 'chocolate4': '#8b4513',
+ 'coral': '#ff7f50',
+ 'coral1': '#ff7256',
+ 'coral2': '#ee6a50',
+ 'coral3': '#cd5b45',
+ 'coral4': '#8b3e2f',
+ 'cornflower': '#6495ed',
+ 'cornflowerblue': '#6495ed',
+ 'cornsilk': '#fff8dc',
+ 'cornsilk1': '#fff8dc',
+ 'cornsilk2': '#eee8cd',
+ 'cornsilk3': '#cdc8b1',
+ 'cornsilk4': '#8b8878',
+ 'cyan': '#00ffff',
+ 'cyan1': '#00ffff',
+ 'cyan2': '#00eeee',
+ 'cyan3': '#00cdcd',
+ 'cyan4': '#008b8b',
+ 'dark': '#8b0000',
+ 'darkblue': '#00008b',
+ 'darkcyan': '#008b8b',
+ 'darkgoldenrod': '#b8860b',
+ 'darkgoldenrod1': '#ffb90f',
+ 'darkgoldenrod2': '#eead0e',
+ 'darkgoldenrod3': '#cd950c',
+ 'darkgoldenrod4': '#8b6508',
+ 'darkgray': '#a9a9a9',
+ 'darkgreen': '#006400',
+ 'darkgrey': '#a9a9a9',
+ 'darkkhaki': '#bdb76b',
+ 'darkmagenta': '#8b008b',
+ 'darkolivegreen': '#556b2f',
+ 'darkolivegreen1': '#caff70',
+ 'darkolivegreen2': '#bcee68',
+ 'darkolivegreen3': '#a2cd5a',
+ 'darkolivegreen4': '#6e8b3d',
+ 'darkorange': '#ff8c00',
+ 'darkorange1': '#ff7f00',
+ 'darkorange2': '#ee7600',
+ 'darkorange3': '#cd6600',
+ 'darkorange4': '#8b4500',
+ 'darkorchid': '#9932cc',
+ 'darkorchid1': '#bf3eff',
+ 'darkorchid2': '#b23aee',
+ 'darkorchid3': '#9a32cd',
+ 'darkorchid4': '#68228b',
+ 'darkred': '#8b0000',
+ 'darksalmon': '#e9967a',
+ 'darkseagreen': '#8fbc8f',
+ 'darkseagreen1': '#c1ffc1',
+ 'darkseagreen2': '#b4eeb4',
+ 'darkseagreen3': '#9bcd9b',
+ 'darkseagreen4': '#698b69',
+ 'darkslateblue': '#483d8b',
+ 'darkslategray': '#2f4f4f',
+ 'darkslategray1': '#97ffff',
+ 'darkslategray2': '#8deeee',
+ 'darkslategray3': '#79cdcd',
+ 'darkslategray4': '#528b8b',
+ 'darkslategrey': '#2f4f4f',
+ 'darkturquoise': '#00ced1',
+ 'darkviolet': '#9400d3',
+ 'deep': '#ff1493',
+ 'deeppink': '#ff1493',
+ 'deeppink1': '#ff1493',
+ 'deeppink2': '#ee1289',
+ 'deeppink3': '#cd1076',
+ 'deeppink4': '#8b0a50',
+ 'deepskyblue': '#00bfff',
+ 'deepskyblue1': '#00bfff',
+ 'deepskyblue2': '#00b2ee',
+ 'deepskyblue3': '#009acd',
+ 'deepskyblue4': '#00688b',
+ 'dim': '#696969',
+ 'dimgray': '#696969',
+ 'dimgrey': '#696969',
+ 'dodger': '#1e90ff',
+ 'dodgerblue': '#1e90ff',
+ 'dodgerblue1': '#1e90ff',
+ 'dodgerblue2': '#1c86ee',
+ 'dodgerblue3': '#1874cd',
+ 'dodgerblue4': '#104e8b',
+ 'firebrick': '#b22222',
+ 'firebrick1': '#ff3030',
+ 'firebrick2': '#ee2c2c',
+ 'firebrick3': '#cd2626',
+ 'firebrick4': '#8b1a1a',
+ 'floral': '#fffaf0',
+ 'floralwhite': '#fffaf0',
+ 'forest': '#228b22',
+ 'forestgreen': '#228b22',
+ 'gainsboro': '#dcdcdc',
+ 'ghost': '#f8f8ff',
+ 'ghostwhite': '#f8f8ff',
+ 'gold': '#ffd700',
+ 'gold1': '#ffd700',
+ 'gold2': '#eec900',
+ 'gold3': '#cdad00',
+ 'gold4': '#8b7500',
+ 'goldenrod': '#daa520',
+ 'goldenrod1': '#ffc125',
+ 'goldenrod2': '#eeb422',
+ 'goldenrod3': '#cd9b1d',
+ 'goldenrod4': '#8b6914',
+ 'gray': '#bebebe',
+ 'gray0': '#000000',
+ 'gray1': '#030303',
+ 'gray10': '#1a1a1a',
+ 'gray100': '#ffffff',
+ 'gray11': '#1c1c1c',
+ 'gray12': '#1f1f1f',
+ 'gray13': '#212121',
+ 'gray14': '#242424',
+ 'gray15': '#262626',
+ 'gray16': '#292929',
+ 'gray17': '#2b2b2b',
+ 'gray18': '#2e2e2e',
+ 'gray19': '#303030',
+ 'gray2': '#050505',
+ 'gray20': '#333333',
+ 'gray21': '#363636',
+ 'gray22': '#383838',
+ 'gray23': '#3b3b3b',
+ 'gray24': '#3d3d3d',
+ 'gray25': '#404040',
+ 'gray26': '#424242',
+ 'gray27': '#454545',
+ 'gray28': '#474747',
+ 'gray29': '#4a4a4a',
+ 'gray3': '#080808',
+ 'gray30': '#4d4d4d',
+ 'gray31': '#4f4f4f',
+ 'gray32': '#525252',
+ 'gray33': '#545454',
+ 'gray34': '#575757',
+ 'gray35': '#595959',
+ 'gray36': '#5c5c5c',
+ 'gray37': '#5e5e5e',
+ 'gray38': '#616161',
+ 'gray39': '#636363',
+ 'gray4': '#0a0a0a',
+ 'gray40': '#666666',
+ 'gray41': '#696969',
+ 'gray42': '#6b6b6b',
+ 'gray43': '#6e6e6e',
+ 'gray44': '#707070',
+ 'gray45': '#737373',
+ 'gray46': '#757575',
+ 'gray47': '#787878',
+ 'gray48': '#7a7a7a',
+ 'gray49': '#7d7d7d',
+ 'gray5': '#0d0d0d',
+ 'gray50': '#7f7f7f',
+ 'gray51': '#828282',
+ 'gray52': '#858585',
+ 'gray53': '#878787',
+ 'gray54': '#8a8a8a',
+ 'gray55': '#8c8c8c',
+ 'gray56': '#8f8f8f',
+ 'gray57': '#919191',
+ 'gray58': '#949494',
+ 'gray59': '#969696',
+ 'gray6': '#0f0f0f',
+ 'gray60': '#999999',
+ 'gray61': '#9c9c9c',
+ 'gray62': '#9e9e9e',
+ 'gray63': '#a1a1a1',
+ 'gray64': '#a3a3a3',
+ 'gray65': '#a6a6a6',
+ 'gray66': '#a8a8a8',
+ 'gray67': '#ababab',
+ 'gray68': '#adadad',
+ 'gray69': '#b0b0b0',
+ 'gray7': '#121212',
+ 'gray70': '#b3b3b3',
+ 'gray71': '#b5b5b5',
+ 'gray72': '#b8b8b8',
+ 'gray73': '#bababa',
+ 'gray74': '#bdbdbd',
+ 'gray75': '#bfbfbf',
+ 'gray76': '#c2c2c2',
+ 'gray77': '#c4c4c4',
+ 'gray78': '#c7c7c7',
+ 'gray79': '#c9c9c9',
+ 'gray8': '#141414',
+ 'gray80': '#cccccc',
+ 'gray81': '#cfcfcf',
+ 'gray82': '#d1d1d1',
+ 'gray83': '#d4d4d4',
+ 'gray84': '#d6d6d6',
+ 'gray85': '#d9d9d9',
+ 'gray86': '#dbdbdb',
+ 'gray87': '#dedede',
+ 'gray88': '#e0e0e0',
+ 'gray89': '#e3e3e3',
+ 'gray9': '#171717',
+ 'gray90': '#e5e5e5',
+ 'gray91': '#e8e8e8',
+ 'gray92': '#ebebeb',
+ 'gray93': '#ededed',
+ 'gray94': '#f0f0f0',
+ 'gray95': '#f2f2f2',
+ 'gray96': '#f5f5f5',
+ 'gray97': '#f7f7f7',
+ 'gray98': '#fafafa',
+ 'gray99': '#fcfcfc',
+ 'green': '#adff2f',
+ 'green1': '#00ff00',
+ 'green2': '#00ee00',
+ 'green3': '#00cd00',
+ 'green4': '#008b00',
+ 'greenyellow': '#adff2f',
+ 'grey': '#bebebe',
+ 'grey0': '#000000',
+ 'grey1': '#030303',
+ 'grey10': '#1a1a1a',
+ 'grey100': '#ffffff',
+ 'grey11': '#1c1c1c',
+ 'grey12': '#1f1f1f',
+ 'grey13': '#212121',
+ 'grey14': '#242424',
+ 'grey15': '#262626',
+ 'grey16': '#292929',
+ 'grey17': '#2b2b2b',
+ 'grey18': '#2e2e2e',
+ 'grey19': '#303030',
+ 'grey2': '#050505',
+ 'grey20': '#333333',
+ 'grey21': '#363636',
+ 'grey22': '#383838',
+ 'grey23': '#3b3b3b',
+ 'grey24': '#3d3d3d',
+ 'grey25': '#404040',
+ 'grey26': '#424242',
+ 'grey27': '#454545',
+ 'grey28': '#474747',
+ 'grey29': '#4a4a4a',
+ 'grey3': '#080808',
+ 'grey30': '#4d4d4d',
+ 'grey31': '#4f4f4f',
+ 'grey32': '#525252',
+ 'grey33': '#545454',
+ 'grey34': '#575757',
+ 'grey35': '#595959',
+ 'grey36': '#5c5c5c',
+ 'grey37': '#5e5e5e',
+ 'grey38': '#616161',
+ 'grey39': '#636363',
+ 'grey4': '#0a0a0a',
+ 'grey40': '#666666',
+ 'grey41': '#696969',
+ 'grey42': '#6b6b6b',
+ 'grey43': '#6e6e6e',
+ 'grey44': '#707070',
+ 'grey45': '#737373',
+ 'grey46': '#757575',
+ 'grey47': '#787878',
+ 'grey48': '#7a7a7a',
+ 'grey49': '#7d7d7d',
+ 'grey5': '#0d0d0d',
+ 'grey50': '#7f7f7f',
+ 'grey51': '#828282',
+ 'grey52': '#858585',
+ 'grey53': '#878787',
+ 'grey54': '#8a8a8a',
+ 'grey55': '#8c8c8c',
+ 'grey56': '#8f8f8f',
+ 'grey57': '#919191',
+ 'grey58': '#949494',
+ 'grey59': '#969696',
+ 'grey6': '#0f0f0f',
+ 'grey60': '#999999',
+ 'grey61': '#9c9c9c',
+ 'grey62': '#9e9e9e',
+ 'grey63': '#a1a1a1',
+ 'grey64': '#a3a3a3',
+ 'grey65': '#a6a6a6',
+ 'grey66': '#a8a8a8',
+ 'grey67': '#ababab',
+ 'grey68': '#adadad',
+ 'grey69': '#b0b0b0',
+ 'grey7': '#121212',
+ 'grey70': '#b3b3b3',
+ 'grey71': '#b5b5b5',
+ 'grey72': '#b8b8b8',
+ 'grey73': '#bababa',
+ 'grey74': '#bdbdbd',
+ 'grey75': '#bfbfbf',
+ 'grey76': '#c2c2c2',
+ 'grey77': '#c4c4c4',
+ 'grey78': '#c7c7c7',
+ 'grey79': '#c9c9c9',
+ 'grey8': '#141414',
+ 'grey80': '#cccccc',
+ 'grey81': '#cfcfcf',
+ 'grey82': '#d1d1d1',
+ 'grey83': '#d4d4d4',
+ 'grey84': '#d6d6d6',
+ 'grey85': '#d9d9d9',
+ 'grey86': '#dbdbdb',
+ 'grey87': '#dedede',
+ 'grey88': '#e0e0e0',
+ 'grey89': '#e3e3e3',
+ 'grey9': '#171717',
+ 'grey90': '#e5e5e5',
+ 'grey91': '#e8e8e8',
+ 'grey92': '#ebebeb',
+ 'grey93': '#ededed',
+ 'grey94': '#f0f0f0',
+ 'grey95': '#f2f2f2',
+ 'grey96': '#f5f5f5',
+ 'grey97': '#f7f7f7',
+ 'grey98': '#fafafa',
+ 'grey99': '#fcfcfc',
+ 'honeydew': '#f0fff0',
+ 'honeydew1': '#f0fff0',
+ 'honeydew2': '#e0eee0',
+ 'honeydew3': '#c1cdc1',
+ 'honeydew4': '#838b83',
+ 'hot': '#ff69b4',
+ 'hotpink': '#ff69b4',
+ 'hotpink1': '#ff6eb4',
+ 'hotpink2': '#ee6aa7',
+ 'hotpink3': '#cd6090',
+ 'hotpink4': '#8b3a62',
+ 'indian': '#cd5c5c',
+ 'indianred': '#cd5c5c',
+ 'indianred1': '#ff6a6a',
+ 'indianred2': '#ee6363',
+ 'indianred3': '#cd5555',
+ 'indianred4': '#8b3a3a',
+ 'ivory': '#fffff0',
+ 'ivory1': '#fffff0',
+ 'ivory2': '#eeeee0',
+ 'ivory3': '#cdcdc1',
+ 'ivory4': '#8b8b83',
+ 'khaki': '#f0e68c',
+ 'khaki1': '#fff68f',
+ 'khaki2': '#eee685',
+ 'khaki3': '#cdc673',
+ 'khaki4': '#8b864e',
+ 'lavender': '#fff0f5',
+ 'lavenderblush': '#fff0f5',
+ 'lavenderblush1': '#fff0f5',
+ 'lavenderblush2': '#eee0e5',
+ 'lavenderblush3': '#cdc1c5',
+ 'lavenderblush4': '#8b8386',
+ 'lawn': '#7cfc00',
+ 'lawngreen': '#7cfc00',
+ 'lemon': '#fffacd',
+ 'lemonchiffon': '#fffacd',
+ 'lemonchiffon1': '#fffacd',
+ 'lemonchiffon2': '#eee9bf',
+ 'lemonchiffon3': '#cdc9a5',
+ 'lemonchiffon4': '#8b8970',
+ 'light': '#90ee90',
+ 'lightblue': '#add8e6',
+ 'lightblue1': '#bfefff',
+ 'lightblue2': '#b2dfee',
+ 'lightblue3': '#9ac0cd',
+ 'lightblue4': '#68838b',
+ 'lightcoral': '#f08080',
+ 'lightcyan': '#e0ffff',
+ 'lightcyan1': '#e0ffff',
+ 'lightcyan2': '#d1eeee',
+ 'lightcyan3': '#b4cdcd',
+ 'lightcyan4': '#7a8b8b',
+ 'lightgoldenrod': '#eedd82',
+ 'lightgoldenrod1': '#ffec8b',
+ 'lightgoldenrod2': '#eedc82',
+ 'lightgoldenrod3': '#cdbe70',
+ 'lightgoldenrod4': '#8b814c',
+ 'lightgoldenrodyellow': '#fafad2',
+ 'lightgray': '#d3d3d3',
+ 'lightgreen': '#90ee90',
+ 'lightgrey': '#d3d3d3',
+ 'lightpink': '#ffb6c1',
+ 'lightpink1': '#ffaeb9',
+ 'lightpink2': '#eea2ad',
+ 'lightpink3': '#cd8c95',
+ 'lightpink4': '#8b5f65',
+ 'lightsalmon': '#ffa07a',
+ 'lightsalmon1': '#ffa07a',
+ 'lightsalmon2': '#ee9572',
+ 'lightsalmon3': '#cd8162',
+ 'lightsalmon4': '#8b5742',
+ 'lightseagreen': '#20b2aa',
+ 'lightskyblue': '#87cefa',
+ 'lightskyblue1': '#b0e2ff',
+ 'lightskyblue2': '#a4d3ee',
+ 'lightskyblue3': '#8db6cd',
+ 'lightskyblue4': '#607b8b',
+ 'lightslateblue': '#8470ff',
+ 'lightslategray': '#778899',
+ 'lightslategrey': '#778899',
+ 'lightsteelblue': '#b0c4de',
+ 'lightsteelblue1': '#cae1ff',
+ 'lightsteelblue2': '#bcd2ee',
+ 'lightsteelblue3': '#a2b5cd',
+ 'lightsteelblue4': '#6e7b8b',
+ 'lightyellow': '#ffffe0',
+ 'lightyellow1': '#ffffe0',
+ 'lightyellow2': '#eeeed1',
+ 'lightyellow3': '#cdcdb4',
+ 'lightyellow4': '#8b8b7a',
+ 'lime': '#32cd32',
+ 'limegreen': '#32cd32',
+ 'linen': '#faf0e6',
+ 'magenta': '#ff00ff',
+ 'magenta1': '#ff00ff',
+ 'magenta2': '#ee00ee',
+ 'magenta3': '#cd00cd',
+ 'magenta4': '#8b008b',
+ 'maroon': '#b03060',
+ 'maroon1': '#ff34b3',
+ 'maroon2': '#ee30a7',
+ 'maroon3': '#cd2990',
+ 'maroon4': '#8b1c62',
+ 'medium': '#9370db',
+ 'mediumaquamarine': '#66cdaa',
+ 'mediumblue': '#0000cd',
+ 'mediumorchid': '#ba55d3',
+ 'mediumorchid1': '#e066ff',
+ 'mediumorchid2': '#d15fee',
+ 'mediumorchid3': '#b452cd',
+ 'mediumorchid4': '#7a378b',
+ 'mediumpurple': '#9370db',
+ 'mediumpurple1': '#ab82ff',
+ 'mediumpurple2': '#9f79ee',
+ 'mediumpurple3': '#8968cd',
+ 'mediumpurple4': '#5d478b',
+ 'mediumseagreen': '#3cb371',
+ 'mediumslateblue': '#7b68ee',
+ 'mediumspringgreen': '#00fa9a',
+ 'mediumturquoise': '#48d1cc',
+ 'mediumvioletred': '#c71585',
+ 'midnight': '#191970',
+ 'midnightblue': '#191970',
+ 'mint': '#f5fffa',
+ 'mintcream': '#f5fffa',
+ 'misty': '#ffe4e1',
+ 'mistyrose': '#ffe4e1',
+ 'mistyrose1': '#ffe4e1',
+ 'mistyrose2': '#eed5d2',
+ 'mistyrose3': '#cdb7b5',
+ 'mistyrose4': '#8b7d7b',
+ 'moccasin': '#ffe4b5',
+ 'navajo': '#ffdead',
+ 'navajowhite': '#ffdead',
+ 'navajowhite1': '#ffdead',
+ 'navajowhite2': '#eecfa1',
+ 'navajowhite3': '#cdb38b',
+ 'navajowhite4': '#8b795e',
+ 'navy': '#000080',
+ 'navyblue': '#000080',
+ 'old': '#fdf5e6',
+ 'oldlace': '#fdf5e6',
+ 'olive': '#6b8e23',
+ 'olivedrab': '#6b8e23',
+ 'olivedrab1': '#c0ff3e',
+ 'olivedrab2': '#b3ee3a',
+ 'olivedrab3': '#9acd32',
+ 'olivedrab4': '#698b22',
+ 'orange': '#ff4500',
+ 'orange1': '#ffa500',
+ 'orange2': '#ee9a00',
+ 'orange3': '#cd8500',
+ 'orange4': '#8b5a00',
+ 'orangered': '#ff4500',
+ 'orangered1': '#ff4500',
+ 'orangered2': '#ee4000',
+ 'orangered3': '#cd3700',
+ 'orangered4': '#8b2500',
+ 'orchid': '#da70d6',
+ 'orchid1': '#ff83fa',
+ 'orchid2': '#ee7ae9',
+ 'orchid3': '#cd69c9',
+ 'orchid4': '#8b4789',
+ 'pale': '#db7093',
+ 'palegoldenrod': '#eee8aa',
+ 'palegreen': '#98fb98',
+ 'palegreen1': '#9aff9a',
+ 'palegreen2': '#90ee90',
+ 'palegreen3': '#7ccd7c',
+ 'palegreen4': '#548b54',
+ 'paleturquoise': '#afeeee',
+ 'paleturquoise1': '#bbffff',
+ 'paleturquoise2': '#aeeeee',
+ 'paleturquoise3': '#96cdcd',
+ 'paleturquoise4': '#668b8b',
+ 'palevioletred': '#db7093',
+ 'palevioletred1': '#ff82ab',
+ 'palevioletred2': '#ee799f',
+ 'palevioletred3': '#cd6889',
+ 'palevioletred4': '#8b475d',
+ 'papaya': '#ffefd5',
+ 'papayawhip': '#ffefd5',
+ 'peach': '#ffdab9',
+ 'peachpuff': '#ffdab9',
+ 'peachpuff1': '#ffdab9',
+ 'peachpuff2': '#eecbad',
+ 'peachpuff3': '#cdaf95',
+ 'peachpuff4': '#8b7765',
+ 'peru': '#cd853f',
+ 'pink': '#ffc0cb',
+ 'pink1': '#ffb5c5',
+ 'pink2': '#eea9b8',
+ 'pink3': '#cd919e',
+ 'pink4': '#8b636c',
+ 'plum': '#dda0dd',
+ 'plum1': '#ffbbff',
+ 'plum2': '#eeaeee',
+ 'plum3': '#cd96cd',
+ 'plum4': '#8b668b',
+ 'powder': '#b0e0e6',
+ 'powderblue': '#b0e0e6',
+ 'purple': '#a020f0',
+ 'purple1': '#9b30ff',
+ 'purple2': '#912cee',
+ 'purple3': '#7d26cd',
+ 'purple4': '#551a8b',
+ 'red': '#ff0000',
+ 'red1': '#ff0000',
+ 'red2': '#ee0000',
+ 'red3': '#cd0000',
+ 'red4': '#8b0000',
+ 'rosy': '#bc8f8f',
+ 'rosybrown': '#bc8f8f',
+ 'rosybrown1': '#ffc1c1',
+ 'rosybrown2': '#eeb4b4',
+ 'rosybrown3': '#cd9b9b',
+ 'rosybrown4': '#8b6969',
+ 'royal': '#4169e1',
+ 'royalblue': '#4169e1',
+ 'royalblue1': '#4876ff',
+ 'royalblue2': '#436eee',
+ 'royalblue3': '#3a5fcd',
+ 'royalblue4': '#27408b',
+ 'saddle': '#8b4513',
+ 'saddlebrown': '#8b4513',
+ 'salmon': '#fa8072',
+ 'salmon1': '#ff8c69',
+ 'salmon2': '#ee8262',
+ 'salmon3': '#cd7054',
+ 'salmon4': '#8b4c39',
+ 'sandy': '#f4a460',
+ 'sandybrown': '#f4a460',
+ 'sea': '#2e8b57',
+ 'seagreen': '#2e8b57',
+ 'seagreen1': '#54ff9f',
+ 'seagreen2': '#4eee94',
+ 'seagreen3': '#43cd80',
+ 'seagreen4': '#2e8b57',
+ 'seashell': '#fff5ee',
+ 'seashell1': '#fff5ee',
+ 'seashell2': '#eee5de',
+ 'seashell3': '#cdc5bf',
+ 'seashell4': '#8b8682',
+ 'sienna': '#a0522d',
+ 'sienna1': '#ff8247',
+ 'sienna2': '#ee7942',
+ 'sienna3': '#cd6839',
+ 'sienna4': '#8b4726',
+ 'sky': '#87ceeb',
+ 'skyblue': '#87ceeb',
+ 'skyblue1': '#87ceff',
+ 'skyblue2': '#7ec0ee',
+ 'skyblue3': '#6ca6cd',
+ 'skyblue4': '#4a708b',
+ 'slate': '#6a5acd',
+ 'slateblue': '#6a5acd',
+ 'slateblue1': '#836fff',
+ 'slateblue2': '#7a67ee',
+ 'slateblue3': '#6959cd',
+ 'slateblue4': '#473c8b',
+ 'slategray': '#708090',
+ 'slategray1': '#c6e2ff',
+ 'slategray2': '#b9d3ee',
+ 'slategray3': '#9fb6cd',
+ 'slategray4': '#6c7b8b',
+ 'slategrey': '#708090',
+ 'snow': '#fffafa',
+ 'snow1': '#fffafa',
+ 'snow2': '#eee9e9',
+ 'snow3': '#cdc9c9',
+ 'snow4': '#8b8989',
+ 'spring': '#00ff7f',
+ 'springgreen': '#00ff7f',
+ 'springgreen1': '#00ff7f',
+ 'springgreen2': '#00ee76',
+ 'springgreen3': '#00cd66',
+ 'springgreen4': '#008b45',
+ 'steel': '#4682b4',
+ 'steelblue': '#4682b4',
+ 'steelblue1': '#63b8ff',
+ 'steelblue2': '#5cacee',
+ 'steelblue3': '#4f94cd',
+ 'steelblue4': '#36648b',
+ 'tan': '#d2b48c',
+ 'tan1': '#ffa54f',
+ 'tan2': '#ee9a49',
+ 'tan3': '#cd853f',
+ 'tan4': '#8b5a2b',
+ 'thistle': '#d8bfd8',
+ 'thistle1': '#ffe1ff',
+ 'thistle2': '#eed2ee',
+ 'thistle3': '#cdb5cd',
+ 'thistle4': '#8b7b8b',
+ 'tomato': '#ff6347',
+ 'tomato1': '#ff6347',
+ 'tomato2': '#ee5c42',
+ 'tomato3': '#cd4f39',
+ 'tomato4': '#8b3626',
+ 'turquoise': '#40e0d0',
+ 'turquoise1': '#00f5ff',
+ 'turquoise2': '#00e5ee',
+ 'turquoise3': '#00c5cd',
+ 'turquoise4': '#00868b',
+ 'violet': '#ee82ee',
+ 'violetred': '#d02090',
+ 'violetred1': '#ff3e96',
+ 'violetred2': '#ee3a8c',
+ 'violetred3': '#cd3278',
+ 'violetred4': '#8b2252',
+ 'wheat': '#f5deb3',
+ 'wheat1': '#ffe7ba',
+ 'wheat2': '#eed8ae',
+ 'wheat3': '#cdba96',
+ 'wheat4': '#8b7e66',
+ 'white': '#ffffff',
+ 'whitesmoke': '#f5f5f5',
+ 'yellow': '#ffff00',
+ 'yellow1': '#ffff00',
+ 'yellow2': '#eeee00',
+ 'yellow3': '#cdcd00',
+ 'yellow4': '#8b8b00',
+ 'yellowgreen': '#9acd32'
+}
+
+TOKENS = {
+ 'normal': '',
+ 'string': 'String',
+ 'number': 'Number',
+ 'float': 'Number.Float',
+ 'constant': 'Name.Constant',
+ 'number': 'Number',
+ 'statement': ('Keyword', 'Name.Tag'),
+ 'identifier': 'Name.Variable',
+ 'operator': 'Operator.Word',
+ 'label': 'Name.Label',
+ 'exception': 'Name.Exception',
+ 'function': ('Name.Function', 'Name.Attribute'),
+ 'preproc': 'Comment.Preproc',
+ 'comment': 'Comment',
+ 'type': 'Keyword.Type',
+ 'diffadd': 'Generic.Inserted',
+ 'diffdelete': 'Generic.Deleted',
+ 'error': 'Generic.Error',
+ 'errormsg': 'Generic.Traceback',
+ 'title': ('Generic.Heading', 'Generic.Subheading'),
+ 'underlined': 'Generic.Emph',
+ 'special': 'Name.Entity',
+ 'nontext': 'Generic.Output'
+}
+
+TOKEN_TYPES = set()
+for token in TOKENS.values():
+ if not isinstance(token, tuple):
+ token = (token,)
+ for token in token:
+ if token:
+ TOKEN_TYPES.add(token.split('.')[0])
+
+
+def get_vim_color(color):
+ if color.startswith('#'):
+ if len(color) == 7:
+ return color
+ else:
+ return '#%s0' % '0'.join(color)[1:]
+ return COLORS.get(color.lower())
+
+
+def find_colors(code):
+ colors = {'Normal': {}}
+ bg_color = None
+ def set(attrib, value):
+ if token not in colors:
+ colors[token] = {}
+ if key.startswith('gui') or attrib not in colors[token]:
+ colors[token][attrib] = value
+
+ for line in code.splitlines():
+ if line.startswith('"'):
+ continue
+ parts = split_re.split(line.strip())
+ if len(parts) == 2 and parts[0] == 'set':
+ p = parts[1].split()
+ if p[0] == 'background' and p[1] == 'dark':
+ token = 'Normal'
+ bg_color = '#000000'
+ elif len(parts) > 2 and \
+ len(parts[0]) >= 2 and \
+ 'highlight'.startswith(parts[0]):
+ token = parts[1].lower()
+ if token not in TOKENS:
+ continue
+ for item in parts[2:]:
+ p = item.split('=', 1)
+ if not len(p) == 2:
+ continue
+ key, value = p
+ if key in ('ctermfg', 'guifg'):
+ color = get_vim_color(value)
+ if color:
+ set('color', color)
+ elif key in ('ctermbg', 'guibg'):
+ color = get_vim_color(value)
+ if color:
+ set('bgcolor', color)
+ elif key in ('term', 'cterm', 'gui'):
+ items = value.split(',')
+ for item in items:
+ item = item.lower()
+ if item == 'none':
+ set('noinherit', True)
+ elif item == 'bold':
+ set('bold', True)
+ elif item == 'underline':
+ set('underline', True)
+ elif item == 'italic':
+ set('italic', True)
+
+ if bg_color is not None and not colors['Normal'].get('bgcolor'):
+ colors['Normal']['bgcolor'] = bg_color
+
+ color_map = {}
+ for token, styles in colors.items():
+ if token in TOKENS:
+ tmp = []
+ if styles.get('noinherit'):
+ tmp.append('noinherit')
+ if 'color' in styles:
+ tmp.append(styles['color'])
+ if 'bgcolor' in styles:
+ tmp.append('bg:' + styles['bgcolor'])
+ if styles.get('bold'):
+ tmp.append('bold')
+ if styles.get('italic'):
+ tmp.append('italic')
+ if styles.get('underline'):
+ tmp.append('underline')
+ tokens = TOKENS[token]
+ if not isinstance(tokens, tuple):
+ tokens = (tokens,)
+ for token in tokens:
+ color_map[token] = ' '.join(tmp)
+
+ default_token = color_map.pop('')
+ return default_token, color_map
+
+
+class StyleWriter:
+
+ def __init__(self, code, name):
+ self.code = code
+ self.name = name.lower()
+
+ def write_header(self, out):
+ out.write('# -*- coding: utf-8 -*-\n"""\n')
+ out.write(' %s Colorscheme\n' % self.name.title())
+ out.write(' %s\n\n' % ('~' * (len(self.name) + 12)))
+ out.write(' Converted by %s\n' % SCRIPT_NAME)
+ out.write('"""\nfrom pygments.style import Style\n')
+ out.write('from pygments.token import Token, %s\n\n' % ', '.join(TOKEN_TYPES))
+ out.write('class %sStyle(Style):\n\n' % self.name.title())
+
+ def write(self, out):
+ self.write_header(out)
+ default_token, tokens = find_colors(self.code)
+ tokens = list(tokens.items())
+ tokens.sort(lambda a, b: cmp(len(a[0]), len(a[1])))
+ bg_color = [x[3:] for x in default_token.split() if x.startswith('bg:')]
+ if bg_color:
+ out.write(' background_color = %r\n' % bg_color[0])
+ out.write(' styles = {\n')
+ out.write(' %-20s%r,\n' % ('Token:', default_token))
+ for token, definition in tokens:
+ if definition:
+ out.write(' %-20s%r,\n' % (token + ':', definition))
+ out.write(' }')
+
+ def __repr__(self):
+ out = StringIO()
+ self.write_style(out)
+ return out.getvalue()
+
+
+def convert(filename, stream=None):
+ name = path.basename(filename)
+ if name.endswith('.vim'):
+ name = name[:-4]
+ f = file(filename)
+ code = f.read()
+ f.close()
+ writer = StyleWriter(code, name)
+ if stream is not None:
+ out = stream
+ else:
+ out = StringIO()
+ writer.write(out)
+ if stream is None:
+ return out.getvalue()
+
+
+def main():
+ if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
+ print('Usage: %s <filename.vim>' % sys.argv[0])
+ return 2
+ if sys.argv[1] in ('-v', '--version'):
+ print('%s %s' % (SCRIPT_NAME, SCRIPT_VERSION))
+ return
+ filename = sys.argv[1]
+ if not (path.exists(filename) and path.isfile(filename)):
+ print('Error: %s not found' % filename)
+ return 1
+ convert(filename, sys.stdout)
+ sys.stdout.write('\n')
+
+
+if __name__ == '__main__':
+ sys.exit(main() or 0)
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..21304dc
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,54 @@
+[metadata]
+name = Pygments
+version = attr: pygments.__version__
+url = https://pygments.org/
+license = BSD-2-Clause
+author = Georg Brandl
+author_email = georg@python.org
+description = Pygments is a syntax highlighting package written in Python.
+long_description = file: description.rst
+long_description_content_type = text/x-rst
+platforms = any
+keywords = syntax highlighting
+classifiers =
+ Development Status :: 6 - Mature
+ Intended Audience :: Developers
+ Intended Audience :: End Users/Desktop
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: BSD License
+ Operating System :: OS Independent
+ Programming Language :: Python
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
+ Programming Language :: Python :: Implementation :: CPython
+ Programming Language :: Python :: Implementation :: PyPy
+ Topic :: Text Processing :: Filters
+ Topic :: Utilities
+project_urls =
+ Documentation = https://pygments.org/docs/
+ Source = https://github.com/pygments/pygments
+ Bug Tracker = https://github.com/pygments/pygments/issues
+ Changelog = https://github.com/pygments/pygments/blob/master/CHANGES
+
+[options]
+packages = find:
+zip_safe = false
+include_package_data = true
+python_requires = >=3.6
+
+[options.packages.find]
+include =
+ pygments
+ pygments.*
+
+[options.entry_points]
+console_scripts =
+ pygmentize = pygments.cmdline:main
+
+[options.extras_require]
+plugins =
+ importlib-metadata;python_version<'3.8'
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..c823345
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+from setuptools import setup
+
+setup()
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..3248d17
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,7 @@
+"""
+ Pygments test package
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..e3cb2c0
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,127 @@
+"""
+ Generated lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Checks that lexers output the expected tokens for each sample
+ under snippets/ and examplefiles/.
+
+ After making a change, rather than updating the samples manually,
+ run `pytest --update-goldens <changed file>`.
+
+ To add a new sample, create a new file matching this pattern.
+ The directory must match the alias of the lexer to be used.
+ Populate only the input, then just `--update-goldens`.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pathlib import Path
+
+import pytest
+
+import pygments.lexers
+from pygments.token import Error
+
+
+def pytest_addoption(parser):
+ parser.addoption('--update-goldens', action='store_true',
+ help='reset golden master benchmarks')
+
+
+class LexerTestItem(pytest.Item):
+ def __init__(self, name, parent):
+ super().__init__(name, parent)
+ self.lexer = Path(str(self.fspath)).parent.name
+ self.actual = None
+
+ @classmethod
+ def _prettyprint_tokens(cls, tokens):
+ for tok, val in tokens:
+ if tok is Error and not cls.allow_errors:
+ raise ValueError('generated Error token at {!r}'.format(val))
+ yield '{!r:<13} {}'.format(val, str(tok)[6:])
+ if val.endswith('\n'):
+ yield ''
+
+ def runtest(self):
+ lexer = pygments.lexers.get_lexer_by_name(self.lexer)
+ tokens = lexer.get_tokens(self.input)
+ self.actual = '\n'.join(self._prettyprint_tokens(tokens)).rstrip('\n') + '\n'
+ if not self.config.getoption('--update-goldens'):
+ assert self.actual == self.expected
+
+ def _test_file_rel_path(self):
+ return Path(str(self.fspath)).relative_to(Path(__file__).parent.parent)
+
+ def _prunetraceback(self, excinfo):
+ excinfo.traceback = excinfo.traceback.cut(__file__).filter()
+
+ def repr_failure(self, excinfo):
+ if isinstance(excinfo.value, AssertionError):
+ rel_path = self._test_file_rel_path()
+ message = (
+ 'The tokens produced by the "{}" lexer differ from the '
+ 'expected ones in the file "{}".\n'
+ 'Run `pytest {} --update-goldens` to update it.'
+ ).format(self.lexer, rel_path, Path(*rel_path.parts[:2]))
+ diff = str(excinfo.value).split('\n', 1)[-1]
+ return message + '\n\n' + diff
+ else:
+ return pytest.Item.repr_failure(self, excinfo)
+
+ def reportinfo(self):
+ return self.fspath, None, str(self._test_file_rel_path())
+
+ def maybe_overwrite(self):
+ if self.actual is not None and self.config.getoption('--update-goldens'):
+ self.overwrite()
+
+
+class LexerSeparateTestItem(LexerTestItem):
+ allow_errors = False
+
+ def __init__(self, name, parent):
+ super().__init__(name, parent)
+
+ self.input = self.fspath.read_text('utf-8')
+ output_path = self.fspath + '.output'
+ if output_path.check():
+ self.expected = output_path.read_text(encoding='utf-8')
+ else:
+ self.expected = ''
+
+ def overwrite(self):
+ output_path = self.fspath + '.output'
+ output_path.write_text(self.actual, encoding='utf-8')
+
+
+class LexerInlineTestItem(LexerTestItem):
+ allow_errors = True
+
+ def __init__(self, name, parent):
+ super().__init__(name, parent)
+
+ content = self.fspath.read_text('utf-8')
+ content, _, self.expected = content.partition('\n---tokens---\n')
+ if content.startswith('---input---\n'):
+ content = '\n' + content
+ self.comment, _, self.input = content.rpartition('\n---input---\n')
+ if not self.input.endswith('\n'):
+ self.input += '\n'
+ self.comment = self.comment.strip()
+
+ def overwrite(self):
+ with self.fspath.open('w', encoding='utf-8') as f:
+ f.write(self.comment)
+ if self.comment:
+ f.write('\n\n')
+ f.write('---input---\n')
+ f.write(self.input)
+ f.write('\n---tokens---\n')
+ f.write(self.actual)
+
+
+def pytest_runtest_teardown(item, nextitem):
+ if isinstance(item, LexerTestItem):
+ item.maybe_overwrite()
diff --git a/tests/contrast/min_contrasts.json b/tests/contrast/min_contrasts.json
new file mode 100644
index 0000000..a69d73f
--- /dev/null
+++ b/tests/contrast/min_contrasts.json
@@ -0,0 +1,50 @@
+{
+ "default": 4.5,
+ "emacs": 2.4,
+ "friendly": 2.2,
+ "friendly_grayscale": 2.2,
+ "colorful": 2.2,
+ "autumn": 2.2,
+ "murphy": 1.4,
+ "manni": 1.4,
+ "material": 1.6,
+ "monokai": 2.3,
+ "perldoc": 2.7,
+ "pastie": 2.5,
+ "borland": 2.3,
+ "trac": 2.3,
+ "native": 3.0,
+ "fruity": 1.6,
+ "bw": 21.0,
+ "vim": 1.3,
+ "vs": 3.6,
+ "tango": 2.4,
+ "rrt": 5.3,
+ "xcode": 5.1,
+ "igor": 3.6,
+ "paraiso-light": 1.3,
+ "paraiso-dark": 1.3,
+ "lovelace": 3.5,
+ "algol": 3.5,
+ "algol_nu": 3.5,
+ "arduino": 2.6,
+ "rainbow_dash": 2.1,
+ "abap": 2.5,
+ "solarized-dark": 1.5,
+ "solarized-light": 1.0,
+ "sas": 4.6,
+ "staroffice": 4.5,
+ "stata": 2.4,
+ "stata-light": 2.4,
+ "stata-dark": 3.4,
+ "inkpot": 1.0,
+ "zenburn": 1.6,
+ "gruvbox-dark": 4.0,
+ "gruvbox-light": 3.2,
+ "dracula": 1.4,
+ "one-dark": 3.7,
+ "lilypond": 2.3,
+ "nord": 2.4,
+ "nord-darker": 2.8,
+ "github-dark": 4.8
+} \ No newline at end of file
diff --git a/tests/contrast/test_contrasts.py b/tests/contrast/test_contrasts.py
new file mode 100644
index 0000000..517b34e
--- /dev/null
+++ b/tests/contrast/test_contrasts.py
@@ -0,0 +1,101 @@
+"""
+ Contrast Tests
+ ~~~~~~~~~~
+
+ Pygments styles should be accessible to people with suboptimal vision.
+ This test ensures that the minimum contrast of styles does not degrade,
+ and that every rule of a new style fulfills the WCAG AA standard.[1]
+
+ [1]: https://www.w3.org/WAI/WCAG21/Understanding/contrast-minimum.html
+"""
+
+import json
+import os
+import sys
+
+import pygments.styles
+import pygments.token
+import wcag_contrast_ratio
+
+JSON_FILENAME = os.path.join(os.path.dirname(__file__), "min_contrasts.json")
+WCAG_AA_CONTRAST = 4.5
+
+
+def hex2rgb(hexstr):
+ hexstr = hexstr.lstrip("#")
+ r = int(hexstr[:2], 16) / 255
+ g = int(hexstr[2:4], 16) / 255
+ b = int(hexstr[4:], 16) / 255
+ return (r, g, b)
+
+
+def get_style_contrasts(style_cls):
+ return [
+ (
+ round(
+ wcag_contrast_ratio.rgb(
+ hex2rgb(style["bgcolor"] or style_cls.background_color),
+ hex2rgb(style["color"] or "#000000")
+ # we default to black because browsers also do
+ ),
+ 1,
+ ),
+ ttype,
+ )
+ for ttype, style in style_cls.list_styles()
+ if ttype != pygments.token.Whitespace
+ ]
+
+
+def builtin_styles():
+ for style_name in pygments.styles.STYLE_MAP:
+ yield (style_name, pygments.styles.get_style_by_name(style_name))
+
+
+def min_contrasts():
+ return {
+ name: min(x[0] for x in get_style_contrasts(style))
+ for name, style in builtin_styles()
+ }
+
+
+def update_json():
+ with open(JSON_FILENAME, "w") as f:
+ json.dump(
+ min_contrasts(),
+ f,
+ indent=2,
+ )
+
+
+def test_contrasts(fail_if_improved=True):
+ with open(JSON_FILENAME) as f:
+ previous_contrasts = json.load(f)
+
+ for style_name in pygments.styles.STYLE_MAP:
+ style = pygments.styles.get_style_by_name(style_name)
+ contrasts = get_style_contrasts(style)
+ min_contrast = min([x[0] for x in contrasts])
+
+ bar = previous_contrasts.get(style_name, WCAG_AA_CONTRAST)
+
+ assert not min_contrast < bar, (
+ "contrast degradation for style '{}'\n"
+ "The following rules have a contrast lower than the required {}:\n\n"
+ "{}\n"
+ ).format(
+ style_name,
+ bar,
+ "\n".join(
+ [
+ "* {:.2f} {}".format(contrast, ttype)
+ for contrast, ttype in contrasts
+ if contrast < bar
+ ]
+ ),
+ )
+
+ if fail_if_improved:
+ assert (
+ not min_contrast > bar
+ ), "congrats, you improved a contrast! please run ./scripts/update_contrasts.py"
diff --git a/tests/dtds/HTML4-f.dtd b/tests/dtds/HTML4-f.dtd
new file mode 100644
index 0000000..9552012
--- /dev/null
+++ b/tests/dtds/HTML4-f.dtd
@@ -0,0 +1,37 @@
+<!--
+ This is the HTML 4.0 Frameset DTD, which should be
+ used for documents with frames. This DTD is identical
+ to the HTML 4.0 Transitional DTD except for the
+ content model of the "HTML" element: in frameset
+ documents, the "FRAMESET" element replaces the "BODY"
+ element.
+
+ Draft: $Date: 1999/05/02 15:37:15 $
+
+ Authors:
+ Dave Raggett <dsr@w3.org>
+ Arnaud Le Hors <lehors@w3.org>
+ Ian Jacobs <ij@w3.org>
+
+ Further information about HTML 4.0 is available at:
+
+ http://www.w3.org/TR/REC-html40.
+-->
+<!ENTITY % HTML.Version "-//W3C//DTD HTML 4.0 Frameset//EN"
+ -- Typical usage:
+
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Frameset//EN"
+ "http://www.w3.org/TR/REC-html40/frameset.dtd">
+ <html>
+ <head>
+ ...
+ </head>
+ <frameset>
+ ...
+ </frameset>
+ </html>
+-->
+
+<!ENTITY % HTML.Frameset "INCLUDE">
+<!ENTITY % HTML4.dtd PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+%HTML4.dtd; \ No newline at end of file
diff --git a/tests/dtds/HTML4-s.dtd b/tests/dtds/HTML4-s.dtd
new file mode 100644
index 0000000..8ce7917
--- /dev/null
+++ b/tests/dtds/HTML4-s.dtd
@@ -0,0 +1,869 @@
+<!--
+ This is HTML 4.0 Strict DTD, which excludes the presentation
+ attributes and elements that W3C expects to phase out as
+ support for style sheets matures. Authors should use the Strict
+ DTD when possible, but may use the Transitional DTD when support
+ for presentation attribute and elements is required.
+
+ HTML 4.0 includes mechanisms for style sheets, scripting,
+ embedding objects, improved support for right to left and mixed
+ direction text, and enhancements to forms for improved
+ accessibility for people with disabilities.
+
+ Draft: $Date: 1999/05/02 15:37:15 $
+
+ Authors:
+ Dave Raggett <dsr@w3.org>
+ Arnaud Le Hors <lehors@w3.org>
+ Ian Jacobs <ij@w3.org>
+
+ Further information about HTML 4.0 is available at:
+
+ http://www.w3.org/TR/REC-html40
+-->
+<!--
+ Typical usage:
+
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN"
+ "http://www.w3.org/TR/REC-html40/strict.dtd">
+ <html>
+ <head>
+ ...
+ </head>
+ <body>
+ ...
+ </body>
+ </html>
+
+ The URI used as a system identifier with the public identifier allows
+ the user agent to download the DTD and entity sets as needed.
+
+ The FPI for the Transitional HTML 4.0 DTD is:
+
+ "-//W3C//DTD HTML 4.0 Transitional//EN
+
+ and its URI is:
+
+ http://www.w3.org/TR/REC-html40/loose.dtd
+
+ If you are writing a document that includes frames, use
+ the following FPI:
+
+ "-//W3C//DTD HTML 4.0 Frameset//EN"
+
+ with the URI:
+
+ http://www.w3.org/TR/REC-html40/frameset.dtd
+
+ The following URIs are supported in relation to HTML 4.0
+
+ "http://www.w3.org/TR/REC-html40/strict.dtd" (Strict DTD)
+ "http://www.w3.org/TR/REC-html40/loose.dtd" (Loose DTD)
+ "http://www.w3.org/TR/REC-html40/frameset.dtd" (Frameset DTD)
+ "http://www.w3.org/TR/REC-html40/HTMLlat1.ent" (Latin-1 entities)
+ "http://www.w3.org/TR/REC-html40/HTMLsymbol.ent" (Symbol entities)
+ "http://www.w3.org/TR/REC-html40/HTMLspecial.ent" (Special entities)
+
+ These URIs point to the latest version of each file. To reference
+ this specific revision use the following URIs:
+
+ "http://www.w3.org/TR/REC-html40-971218/strict.dtd"
+ "http://www.w3.org/TR/REC-html40-971218/loose.dtd"
+ "http://www.w3.org/TR/REC-html40-971218/frameset.dtd"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLlat1.ent"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLsymbol.ent"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLspecial.ent"
+
+-->
+
+<!--================== Imported Names ====================================-->
+
+<!ENTITY % ContentType "CDATA"
+ -- media type, as per [RFC2045]
+ -->
+
+<!ENTITY % ContentTypes "CDATA"
+ -- comma-separated list of media types, as per [RFC2045]
+ -->
+
+<!ENTITY % Charset "CDATA"
+ -- a character encoding, as per [RFC2045]
+ -->
+
+<!ENTITY % Charsets "CDATA"
+ -- a space separated list of character encodings, as per [RFC2045]
+ -->
+
+<!ENTITY % LanguageCode "NAME"
+ -- a language code, as per [RFC1766]
+ -->
+
+<!ENTITY % Character "CDATA"
+ -- a single character from [ISO10646]
+ -->
+
+<!ENTITY % LinkTypes "CDATA"
+ -- space-separated list of link types
+ -->
+
+<!ENTITY % MediaDesc "CDATA"
+ -- single or comma-separated list of media descriptors
+ -->
+
+<!ENTITY % URI "CDATA"
+ -- a Uniform Resource Identifier,
+ see [URI]
+ -->
+
+<!ENTITY % Datetime "CDATA" -- date and time information. ISO date format -->
+
+
+<!ENTITY % Script "CDATA" -- script expression -->
+
+<!ENTITY % StyleSheet "CDATA" -- style sheet data -->
+
+
+
+<!ENTITY % Text "CDATA">
+
+
+<!-- Parameter Entities -->
+
+<!ENTITY % head.misc "SCRIPT|STYLE|META|LINK|OBJECT" -- repeatable head elements -->
+
+<!ENTITY % heading "H1|H2|H3|H4|H5|H6">
+
+<!ENTITY % list "UL | OL">
+
+<!ENTITY % preformatted "PRE">
+
+
+<!--================ Character mnemonic entities =========================-->
+
+<!ENTITY % HTMLlat1 PUBLIC
+ "-//W3C//ENTITIES Latin1//EN//HTML"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLlat1.ent">
+%HTMLlat1;
+
+<!ENTITY % HTMLsymbol PUBLIC
+ "-//W3C//ENTITIES Symbols//EN//HTML"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLsymbol.ent">
+%HTMLsymbol;
+
+<!ENTITY % HTMLspecial PUBLIC
+ "-//W3C//ENTITIES Special//EN//HTML"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLspecial.ent">
+%HTMLspecial;
+<!--=================== Generic Attributes ===============================-->
+
+<!ENTITY % coreattrs
+ "id ID #IMPLIED -- document-wide unique id --
+ class CDATA #IMPLIED -- space separated list of classes --
+ style %StyleSheet; #IMPLIED -- associated style info --
+ title %Text; #IMPLIED -- advisory title/amplification --"
+ >
+
+<!ENTITY % i18n
+ "lang %LanguageCode; #IMPLIED -- language code --
+ dir (ltr|rtl) #IMPLIED -- direction for weak/neutral text --"
+ >
+
+<!ENTITY % events
+ "onclick %Script; #IMPLIED -- a pointer button was clicked --
+ ondblclick %Script; #IMPLIED -- a pointer button was double clicked--
+ onmousedown %Script; #IMPLIED -- a pointer button was pressed down --
+ onmouseup %Script; #IMPLIED -- a pointer button was released --
+ onmouseover %Script; #IMPLIED -- a pointer was moved onto --
+ onmousemove %Script; #IMPLIED -- a pointer was moved within --
+ onmouseout %Script; #IMPLIED -- a pointer was moved away --
+ onkeypress %Script; #IMPLIED -- a key was pressed and released --
+ onkeydown %Script; #IMPLIED -- a key was pressed down --
+ onkeyup %Script; #IMPLIED -- a key was released --"
+ >
+
+<!-- Reserved Feature Switch -->
+<!ENTITY % HTML.Reserved "IGNORE">
+
+<!-- The following attributes are reserved for possible future use -->
+<![ %HTML.Reserved; [
+<!ENTITY % reserved
+ "datasrc %URI; #IMPLIED -- a single or tabular Data Source --
+ datafld CDATA #IMPLIED -- the property or column name --
+ dataformatas (plaintext|html) plaintext -- text or html --"
+ >
+]]>
+
+<!ENTITY % reserved "">
+
+<!ENTITY % attrs "%coreattrs; %i18n; %events;">
+
+
+<!--=================== Text Markup ======================================-->
+
+<!ENTITY % fontstyle
+ "TT | I | B | BIG | SMALL">
+
+<!ENTITY % phrase "EM | STRONG | DFN | CODE |
+ SAMP | KBD | VAR | CITE | ABBR | ACRONYM" >
+
+<!ENTITY % special
+ "A | IMG | OBJECT | BR | SCRIPT | MAP | Q | SUB | SUP | SPAN | BDO">
+
+<!ENTITY % formctrl "INPUT | SELECT | TEXTAREA | LABEL | BUTTON">
+
+<!-- %inline; covers inline or "text-level" elements -->
+<!ENTITY % inline "#PCDATA | %fontstyle; | %phrase; | %special; | %formctrl;">
+
+<!ELEMENT (%fontstyle;|%phrase;) - - (%inline;)*>
+<!ATTLIST (%fontstyle;|%phrase;)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT (SUB|SUP) - - (%inline;)* -- subscript, superscript -->
+<!ATTLIST (SUB|SUP)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT SPAN - - (%inline;)* -- generic language/style container -->
+<!ATTLIST SPAN
+ %attrs; -- %coreattrs, %i18n, %events --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT BDO - - (%inline;)* -- I18N BiDi over-ride -->
+<!ATTLIST BDO
+ %coreattrs; -- id, class, style, title --
+ lang %LanguageCode; #IMPLIED -- language code --
+ dir (ltr|rtl) #REQUIRED -- directionality --
+ >
+
+
+<!ELEMENT BR - O EMPTY -- forced line break -->
+<!ATTLIST BR
+ %coreattrs; -- id, class, style, title --
+ >
+
+<!--================== HTML content models ===============================-->
+
+<!--
+ HTML has two basic content models:
+
+ %inline; character level elements and text strings
+ %block; block-like elements e.g. paragraphs and lists
+-->
+
+<!ENTITY % block
+ "P | %heading; | %list; | %preformatted; | DL | DIV | NOSCRIPT |
+ BLOCKQUOTE | FORM | HR | TABLE | FIELDSET | ADDRESS">
+
+<!ENTITY % flow "%block; | %inline;">
+
+<!--=================== Document Body ====================================-->
+
+<!ELEMENT BODY O O (%block;|SCRIPT)+ +(INS|DEL) -- document body -->
+<!ATTLIST BODY
+ %attrs; -- %coreattrs, %i18n, %events --
+ onload %Script; #IMPLIED -- the document has been loaded --
+ onunload %Script; #IMPLIED -- the document has been removed --
+ >
+
+<!ELEMENT ADDRESS - - (%inline;)* -- information on author -->
+<!ATTLIST ADDRESS
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT DIV - - (%flow;)* -- generic language/style container -->
+<!ATTLIST DIV
+ %attrs; -- %coreattrs, %i18n, %events --
+ %reserved; -- reserved for possible future use --
+ >
+
+
+<!--================== The Anchor Element ================================-->
+
+<!ENTITY % Shape "(rect|circle|poly|default)">
+<!ENTITY % Coords "CDATA" -- comma separated list of lengths -->
+
+<!ELEMENT A - - (%inline;)* -(A) -- anchor -->
+<!ATTLIST A
+ %attrs; -- %coreattrs, %i18n, %events --
+ charset %Charset; #IMPLIED -- char encoding of linked resource --
+ type %ContentType; #IMPLIED -- advisory content type --
+ name CDATA #IMPLIED -- named link end --
+ href %URI; #IMPLIED -- URI for linked resource --
+ hreflang %LanguageCode; #IMPLIED -- language code --
+ rel %LinkTypes; #IMPLIED -- forward link types --
+ rev %LinkTypes; #IMPLIED -- reverse link types --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ shape %Shape; rect -- for use with client-side image maps --
+ coords %Coords; #IMPLIED -- for use with client-side image maps --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ >
+
+<!--================== Client-side image maps ============================-->
+
+<!-- These can be placed in the same document or grouped in a
+ separate document although this isn't yet widely supported -->
+
+<!ELEMENT MAP - - ((%block;)+ | AREA+) -- client-side image map -->
+<!ATTLIST MAP
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #REQUIRED -- for reference by usemap --
+ >
+
+<!ELEMENT AREA - O EMPTY -- client-side image map area -->
+<!ATTLIST AREA
+ %attrs; -- %coreattrs, %i18n, %events --
+ shape %Shape; rect -- controls interpretation of coords --
+ coords %Coords; #IMPLIED -- comma separated list of lengths --
+ href %URI; #IMPLIED -- URI for linked resource --
+ nohref (nohref) #IMPLIED -- this region has no action --
+ alt %Text; #REQUIRED -- short description --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ >
+
+<!--================== The LINK Element ==================================-->
+
+<!--
+ Relationship values can be used in principle:
+
+ a) for document specific toolbars/menus when used
+ with the LINK element in document head e.g.
+ start, contents, previous, next, index, end, help
+ b) to link to a separate style sheet (rel=stylesheet)
+ c) to make a link to a script (rel=script)
+ d) by stylesheets to control how collections of
+ html nodes are rendered into printed documents
+ e) to make a link to a printable version of this document
+ e.g. a postscript or pdf version (rel=alternate media=print)
+-->
+
+<!ELEMENT LINK - O EMPTY -- a media-independent link -->
+<!ATTLIST LINK
+ %attrs; -- %coreattrs, %i18n, %events --
+ charset %Charset; #IMPLIED -- char encoding of linked resource --
+ href %URI; #IMPLIED -- URI for linked resource --
+ hreflang %LanguageCode; #IMPLIED -- language code --
+ type %ContentType; #IMPLIED -- advisory content type --
+ rel %LinkTypes; #IMPLIED -- forward link types --
+ rev %LinkTypes; #IMPLIED -- reverse link types --
+ media %MediaDesc; #IMPLIED -- for rendering on these media --
+ >
+
+<!--=================== Images ===========================================-->
+
+<!-- Length defined in strict DTD for cellpadding/cellspacing -->
+<!ENTITY % Length "CDATA" -- nn for pixels or nn% for percentage length -->
+<!ENTITY % MultiLength "CDATA" -- pixel, percentage, or relative -->
+
+<!ENTITY % MultiLengths "CDATA" -- comma-separated list of MultiLength -->
+
+<!ENTITY % Pixels "CDATA" -- integer representing length in pixels -->
+
+
+<!-- To avoid problems with text-only UAs as well as
+ to make image content understandable and navigable
+ to users of non-visual UAs, you need to provide
+ a description with ALT, and avoid server-side image maps -->
+<!ELEMENT IMG - O EMPTY -- Embedded image -->
+<!ATTLIST IMG
+ %attrs; -- %coreattrs, %i18n, %events --
+ src %URI; #REQUIRED -- URI of image to embed --
+ alt %Text; #REQUIRED -- short description --
+ longdesc %URI; #IMPLIED -- link to long description
+ (complements alt) --
+ height %Length; #IMPLIED -- override height --
+ width %Length; #IMPLIED -- override width --
+ usemap %URI; #IMPLIED -- use client-side image map --
+ ismap (ismap) #IMPLIED -- use server-side image map --
+ >
+
+<!-- USEMAP points to a MAP element which may be in this document
+ or an external document, although the latter is not widely supported -->
+
+<!--==================== OBJECT ======================================-->
+<!--
+ OBJECT is used to embed objects as part of HTML pages
+ PARAM elements should precede other content. SGML mixed content
+ model technicality precludes specifying this formally ...
+-->
+
+<!ELEMENT OBJECT - - (PARAM | %flow;)*
+ -- generic embedded object -->
+<!ATTLIST OBJECT
+ %attrs; -- %coreattrs, %i18n, %events --
+ declare (declare) #IMPLIED -- declare but don't instantiate flag --
+ classid %URI; #IMPLIED -- identifies an implementation --
+ codebase %URI; #IMPLIED -- base URI for classid, data, archive--
+ data %URI; #IMPLIED -- reference to object's data --
+ type %ContentType; #IMPLIED -- content type for data --
+ codetype %ContentType; #IMPLIED -- content type for code --
+ archive %URI; #IMPLIED -- space separated archive list --
+ standby %Text; #IMPLIED -- message to show while loading --
+ height %Length; #IMPLIED -- override height --
+ width %Length; #IMPLIED -- override width --
+ usemap %URI; #IMPLIED -- use client-side image map --
+ name CDATA #IMPLIED -- submit as part of form --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT PARAM - O EMPTY -- named property value -->
+<!ATTLIST PARAM
+ id ID #IMPLIED -- document-wide unique id --
+ name CDATA #REQUIRED -- property name --
+ value CDATA #IMPLIED -- property value --
+ valuetype (DATA|REF|OBJECT) DATA -- How to interpret value --
+ type %ContentType; #IMPLIED -- content type for value
+ when valuetype=ref --
+ >
+
+
+<!--=================== Horizontal Rule ==================================-->
+
+<!ELEMENT HR - O EMPTY -- horizontal rule -->
+<!ATTLIST HR
+ %coreattrs; -- id, class, style, title --
+ %events;
+ >
+
+<!--=================== Paragraphs =======================================-->
+
+<!ELEMENT P - O (%inline;)* -- paragraph -->
+<!ATTLIST P
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--=================== Headings =========================================-->
+
+<!--
+ There are six levels of headings from H1 (the most important)
+ to H6 (the least important).
+-->
+
+<!ELEMENT (%heading;) - - (%inline;)* -- heading -->
+<!ATTLIST (%heading;)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--=================== Preformatted Text ================================-->
+
+<!-- excludes markup for images and changes in font size -->
+<!ENTITY % pre.exclusion "IMG|OBJECT|BIG|SMALL|SUB|SUP">
+
+<!ELEMENT PRE - - (%inline;)* -(%pre.exclusion;) -- preformatted text -->
+<!ATTLIST PRE
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--===================== Inline Quotes ==================================-->
+
+<!ELEMENT Q - - (%inline;)* -- short inline quotation -->
+<!ATTLIST Q
+ %attrs; -- %coreattrs, %i18n, %events --
+ cite %URI; #IMPLIED -- URI for source document or msg --
+ >
+
+<!--=================== Block-like Quotes ================================-->
+
+<!ELEMENT BLOCKQUOTE - - (%block;|SCRIPT)+ -- long quotation -->
+<!ATTLIST BLOCKQUOTE
+ %attrs; -- %coreattrs, %i18n, %events --
+ cite %URI; #IMPLIED -- URI for source document or msg --
+ >
+
+<!--=================== Inserted/Deleted Text ============================-->
+
+
+<!-- INS/DEL are handled by inclusion on BODY -->
+<!ELEMENT (INS|DEL) - - (%flow;)* -- inserted text, deleted text -->
+<!ATTLIST (INS|DEL)
+ %attrs; -- %coreattrs, %i18n, %events --
+ cite %URI; #IMPLIED -- info on reason for change --
+ datetime %Datetime; #IMPLIED -- date and time of change --
+ >
+
+<!--=================== Lists ============================================-->
+
+<!-- definition lists - DT for term, DD for its definition -->
+
+<!ELEMENT DL - - (DT|DD)+ -- definition list -->
+<!ATTLIST DL
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT DT - O (%inline;)* -- definition term -->
+<!ELEMENT DD - O (%flow;)* -- definition description -->
+<!ATTLIST (DT|DD)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+
+<!ELEMENT OL - - (LI)+ -- ordered list -->
+<!ATTLIST OL
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!-- Unordered Lists (UL) bullet styles -->
+<!ELEMENT UL - - (LI)+ -- unordered list -->
+<!ATTLIST UL
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+
+
+<!ELEMENT LI - O (%flow;)* -- list item -->
+<!ATTLIST LI
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--================ Forms ===============================================-->
+<!ELEMENT FORM - - (%block;|SCRIPT)+ -(FORM) -- interactive form -->
+<!ATTLIST FORM
+ %attrs; -- %coreattrs, %i18n, %events --
+ action %URI; #REQUIRED -- server-side form handler --
+ method (GET|POST) GET -- HTTP method used to submit the form--
+ enctype %ContentType; "application/x-www-form-urlencoded"
+ onsubmit %Script; #IMPLIED -- the form was submitted --
+ onreset %Script; #IMPLIED -- the form was reset --
+ accept-charset %Charsets; #IMPLIED -- list of supported charsets --
+ >
+
+<!-- Each label must not contain more than ONE field -->
+<!ELEMENT LABEL - - (%inline;)* -(LABEL) -- form field label text -->
+<!ATTLIST LABEL
+ %attrs; -- %coreattrs, %i18n, %events --
+ for IDREF #IMPLIED -- matches field ID value --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ >
+
+<!ENTITY % InputType
+ "(TEXT | PASSWORD | CHECKBOX |
+ RADIO | SUBMIT | RESET |
+ FILE | HIDDEN | IMAGE | BUTTON)"
+ >
+
+<!-- attribute name required for all but submit & reset -->
+<!ELEMENT INPUT - O EMPTY -- form control -->
+<!ATTLIST INPUT
+ %attrs; -- %coreattrs, %i18n, %events --
+ type %InputType; TEXT -- what kind of widget is needed --
+ name CDATA #IMPLIED -- submit as part of form --
+ value CDATA #IMPLIED -- required for radio and checkboxes --
+ checked (checked) #IMPLIED -- for radio buttons and check boxes --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ readonly (readonly) #IMPLIED -- for text and passwd --
+ size CDATA #IMPLIED -- specific to each type of field --
+ maxlength NUMBER #IMPLIED -- max chars for text fields --
+ src %URI; #IMPLIED -- for fields with images --
+ alt CDATA #IMPLIED -- short description --
+ usemap %URI; #IMPLIED -- use client-side image map --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ onselect %Script; #IMPLIED -- some text was selected --
+ onchange %Script; #IMPLIED -- the element value was changed --
+ accept %ContentTypes; #IMPLIED -- list of MIME types for file upload --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT SELECT - - (OPTGROUP|OPTION)+ -- option selector -->
+<!ATTLIST SELECT
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #IMPLIED -- field name --
+ size NUMBER #IMPLIED -- rows visible --
+ multiple (multiple) #IMPLIED -- default is single selection --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ onchange %Script; #IMPLIED -- the element value was changed --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT OPTGROUP - - (OPTION)+ -- option group -->
+<!ATTLIST OPTGROUP
+ %attrs; -- %coreattrs, %i18n, %events --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ label %Text; #REQUIRED -- for use in hierarchical menus --
+ >
+
+<!ELEMENT OPTION - O (#PCDATA) -- selectable choice -->
+<!ATTLIST OPTION
+ %attrs; -- %coreattrs, %i18n, %events --
+ selected (selected) #IMPLIED
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ label %Text; #IMPLIED -- for use in hierarchical menus --
+ value CDATA #IMPLIED -- defaults to element content --
+ >
+
+<!ELEMENT TEXTAREA - - (#PCDATA) -- multi-line text field -->
+<!ATTLIST TEXTAREA
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #IMPLIED
+ rows NUMBER #REQUIRED
+ cols NUMBER #REQUIRED
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ readonly (readonly) #IMPLIED
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ onselect %Script; #IMPLIED -- some text was selected --
+ onchange %Script; #IMPLIED -- the element value was changed --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!--
+ #PCDATA is to solve the mixed content problem,
+ per specification only whitespace is allowed there!
+ -->
+<!ELEMENT FIELDSET - - (#PCDATA,LEGEND,(%flow;)*) -- form control group -->
+<!ATTLIST FIELDSET
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT LEGEND - - (%inline;)* -- fieldset legend -->
+<!ENTITY % LAlign "(top|bottom|left|right)">
+
+<!ATTLIST LEGEND
+ %attrs; -- %coreattrs, %i18n, %events --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ >
+
+<!ELEMENT BUTTON - -
+ (%flow;)* -(A|%formctrl;|FORM|FIELDSET)
+ -- push button -->
+<!ATTLIST BUTTON
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #IMPLIED
+ value CDATA #IMPLIED -- sent to server when submitted --
+ type (button|submit|reset) submit -- for use as form button --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!--======================= Tables =======================================-->
+
+<!-- IETF HTML table standard, see [RFC1942] -->
+
+<!--
+ The BORDER attribute sets the thickness of the frame around the
+ table. The default units are screen pixels.
+
+ The FRAME attribute specifies which parts of the frame around
+ the table should be rendered. The values are not the same as
+ CALS to avoid a name clash with the VALIGN attribute.
+
+ The value "border" is included for backwards compatibility with
+ <TABLE BORDER> which yields frame=border and border=implied
+ For <TABLE BORDER=1> you get border=1 and frame=implied. In this
+ case, it is appropriate to treat this as frame=border for backwards
+ compatibility with deployed browsers.
+-->
+<!ENTITY % TFrame "(void|above|below|hsides|lhs|rhs|vsides|box|border)">
+
+<!--
+ The RULES attribute defines which rules to draw between cells:
+
+ If RULES is absent then assume:
+ "none" if BORDER is absent or BORDER=0 otherwise "all"
+-->
+
+<!ENTITY % TRules "(none | groups | rows | cols | all)">
+
+<!-- horizontal placement of table relative to document -->
+<!ENTITY % TAlign "(left|center|right)">
+
+<!-- horizontal alignment attributes for cell contents -->
+<!ENTITY % cellhalign
+ "align (left|center|right|justify|char) #IMPLIED
+ char %Character; #IMPLIED -- alignment char, e.g. char=':' --
+ charoff %Length; #IMPLIED -- offset for alignment char --"
+ >
+
+<!-- vertical alignment attributes for cell contents -->
+<!ENTITY % cellvalign
+ "valign (top|middle|bottom|baseline) #IMPLIED"
+ >
+
+<!ELEMENT TABLE - -
+ (CAPTION?, (COL*|COLGROUP*), THEAD?, TFOOT?, TBODY+)>
+<!ELEMENT CAPTION - - (%inline;)* -- table caption -->
+<!ELEMENT THEAD - O (TR)+ -- table header -->
+<!ELEMENT TFOOT - O (TR)+ -- table footer -->
+<!ELEMENT TBODY O O (TR)+ -- table body -->
+<!ELEMENT COLGROUP - O (col)* -- table column group -->
+<!ELEMENT COL - O EMPTY -- table column -->
+<!ELEMENT TR - O (TH|TD)+ -- table row -->
+<!ELEMENT (TH|TD) - O (%flow;)* -- table header cell, table data cell-->
+
+<!ATTLIST TABLE -- table element --
+ %attrs; -- %coreattrs, %i18n, %events --
+ summary %Text; #IMPLIED -- purpose/structure for speech output--
+ width %Length; #IMPLIED -- table width --
+ border %Pixels; #IMPLIED -- controls frame width around table --
+ frame %TFrame; #IMPLIED -- which parts of frame to render --
+ rules %TRules; #IMPLIED -- rulings between rows and cols --
+ cellspacing %Length; #IMPLIED -- spacing between cells --
+ cellpadding %Length; #IMPLIED -- spacing within cells --
+ %reserved; -- reserved for possible future use --
+ datapagesize CDATA #IMPLIED -- reserved for possible future use --
+ >
+
+<!ENTITY % CAlign "(top|bottom|left|right)">
+
+<!ATTLIST CAPTION
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--
+COLGROUP groups a set of COL elements. It allows you to group
+several semantically related columns together.
+-->
+<!ATTLIST COLGROUP
+ %attrs; -- %coreattrs, %i18n, %events --
+ span NUMBER 1 -- default number of columns in group --
+ width %MultiLength; #IMPLIED -- default width for enclosed COLs --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+<!--
+ COL elements define the alignment properties for cells in
+ one or more columns.
+
+ The WIDTH attribute specifies the width of the columns, e.g.
+
+ width=64 width in screen pixels
+ width=0.5* relative width of 0.5
+
+ The SPAN attribute causes the attributes of one
+ COL element to apply to more than one column.
+-->
+<!ATTLIST COL -- column groups and properties --
+ %attrs; -- %coreattrs, %i18n, %events --
+ span NUMBER 1 -- COL attributes affect N columns --
+ width %MultiLength; #IMPLIED -- column width specification --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+<!--
+ Use THEAD to duplicate headers when breaking table
+ across page boundaries, or for static headers when
+ TBODY sections are rendered in scrolling panel.
+
+ Use TFOOT to duplicate footers when breaking table
+ across page boundaries, or for static footers when
+ TBODY sections are rendered in scrolling panel.
+
+ Use multiple TBODY sections when rules are needed
+ between groups of table rows.
+-->
+<!ATTLIST (THEAD|TBODY|TFOOT) -- table section --
+ %attrs; -- %coreattrs, %i18n, %events --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+<!ATTLIST TR -- table row --
+ %attrs; -- %coreattrs, %i18n, %events --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+
+<!-- Scope is simpler than axes attribute for common tables -->
+<!ENTITY % Scope "(row|col|rowgroup|colgroup)">
+
+<!-- TH is for headers, TD for data, but for cells acting as both use TD -->
+<!ATTLIST (TH|TD) -- header or data cell --
+ %attrs; -- %coreattrs, %i18n, %events --
+ abbr %Text; #IMPLIED -- abbreviation for header cell --
+ axis CDATA #IMPLIED -- names groups of related headers--
+ headers IDREFS #IMPLIED -- list of id's for header cells --
+ scope %Scope; #IMPLIED -- scope covered by header cells --
+ rowspan NUMBER 1 -- number of rows spanned by cell --
+ colspan NUMBER 1 -- number of cols spanned by cell --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+
+<!--================ Document Head =======================================-->
+<!-- %head.misc; defined earlier on as "SCRIPT|STYLE|META|LINK|OBJECT" -->
+<!ENTITY % head.content "TITLE & BASE?">
+
+<!ELEMENT HEAD O O (%head.content;) +(%head.misc;) -- document head -->
+<!ATTLIST HEAD
+ %i18n; -- lang, dir --
+ profile %URI; #IMPLIED -- named dictionary of meta info --
+ >
+
+<!-- The TITLE element is not considered part of the flow of text.
+ It should be displayed, for example as the page header or
+ window title. Exactly one title is required per document.
+ -->
+<!ELEMENT TITLE - - (#PCDATA) -(%head.misc;) -- document title -->
+<!ATTLIST TITLE %i18n>
+
+
+<!ELEMENT BASE - O EMPTY -- document base URI -->
+<!ATTLIST BASE
+ href %URI; #REQUIRED -- URI that acts as base URI --
+ >
+
+<!ELEMENT META - O EMPTY -- generic metainformation -->
+<!ATTLIST META
+ %i18n; -- lang, dir, for use with content --
+ http-equiv NAME #IMPLIED -- HTTP response header name --
+ name NAME #IMPLIED -- metainformation name --
+ content CDATA #REQUIRED -- associated information --
+ scheme CDATA #IMPLIED -- select form of content --
+ >
+
+<!ELEMENT STYLE - - %StyleSheet -- style info -->
+<!ATTLIST STYLE
+ %i18n; -- lang, dir, for use with title --
+ type %ContentType; #REQUIRED -- content type of style language --
+ media %MediaDesc; #IMPLIED -- designed for use with these media --
+ title %Text; #IMPLIED -- advisory title --
+ >
+
+<!ELEMENT SCRIPT - - %Script; -- script statements -->
+<!ATTLIST SCRIPT
+ charset %Charset; #IMPLIED -- char encoding of linked resource --
+ type %ContentType; #REQUIRED -- content type of script language --
+ language CDATA #IMPLIED -- predefined script language name --
+ src %URI; #IMPLIED -- URI for an external script --
+ defer (defer) #IMPLIED -- UA may defer execution of script --
+ event CDATA #IMPLIED -- reserved for possible future use --
+ for %URI; #IMPLIED -- reserved for possible future use --
+ >
+
+<!ELEMENT NOSCRIPT - - (%block;)+
+ -- alternate content container for non script-based rendering -->
+<!ATTLIST NOSCRIPT
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--================ Document Structure ==================================-->
+<!ENTITY % html.content "HEAD, BODY">
+
+<!ELEMENT HTML O O (%html.content;) -- document root element -->
+<!ATTLIST HTML
+ %i18n; -- lang, dir --
+ >
diff --git a/tests/dtds/HTML4.dcl b/tests/dtds/HTML4.dcl
new file mode 100644
index 0000000..db46db0
--- /dev/null
+++ b/tests/dtds/HTML4.dcl
@@ -0,0 +1,88 @@
+<!SGML "ISO 8879:1986 (WWW)"
+ --
+ SGML Declaration for HyperText Markup Language version 4.0
+
+ With support for the first 17 planes of ISO 10646 and
+ increased limits for tag and literal lengths etc.
+
+ Modified by jjc to work around SP's 16-bit character limit.
+ Modified by jjc to support hex character references.
+ --
+
+ CHARSET
+ BASESET "ISO Registration Number 177//CHARSET
+ ISO/IEC 10646-1:1993 UCS-4 with
+ implementation level 3//ESC 2/5 2/15 4/6"
+ DESCSET 0 9 UNUSED
+ 9 2 9
+ 11 2 UNUSED
+ 13 1 13
+ 14 18 UNUSED
+ 32 95 32
+ 127 1 UNUSED
+ 128 32 UNUSED
+ -- jjc: changed the rest of the DESCSET.
+ Note that surrogates are not declared UNUSED;
+ this allows non-BMP characters to be parsed. --
+ 160 65376 160
+ -- 160 55136 160
+ 55296 2048 UNUSED
+ 57344 1056768 57344 --
+
+CAPACITY SGMLREF
+ TOTALCAP 150000
+ GRPCAP 150000
+ ENTCAP 150000
+
+SCOPE DOCUMENT
+SYNTAX
+ SHUNCHAR CONTROLS 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 127
+ BASESET "ISO 646IRV:1991//CHARSET
+ International Reference Version
+ (IRV)//ESC 2/8 4/2"
+ DESCSET 0 128 0
+
+ FUNCTION
+ RE 13
+ RS 10
+ SPACE 32
+ TAB SEPCHAR 9
+
+ NAMING LCNMSTRT ""
+ UCNMSTRT ""
+ LCNMCHAR ".-_:"
+ UCNMCHAR ".-_:"
+ NAMECASE GENERAL YES
+ ENTITY NO
+ DELIM GENERAL SGMLREF
+ HCRO "&#38;#X" -- added by jjc --
+ SHORTREF SGMLREF
+ NAMES SGMLREF
+ QUANTITY SGMLREF
+ ATTCNT 60 -- increased --
+ ATTSPLEN 65536 -- These are the largest values --
+ LITLEN 65536 -- permitted in the declaration --
+ NAMELEN 65536 -- Avoid fixed limits in actual --
+ PILEN 65536 -- implementations of HTML UA's --
+ TAGLVL 100
+ TAGLEN 65536
+ GRPGTCNT 150
+ GRPCNT 64
+
+FEATURES
+ MINIMIZE
+ DATATAG NO
+ OMITTAG YES
+ RANK NO
+ SHORTTAG YES
+ LINK
+ SIMPLE NO
+ IMPLICIT NO
+ EXPLICIT NO
+ OTHER
+ CONCUR NO
+ SUBDOC NO
+ FORMAL YES
+ APPINFO NONE
+> \ No newline at end of file
diff --git a/tests/dtds/HTML4.dtd b/tests/dtds/HTML4.dtd
new file mode 100644
index 0000000..9e781db
--- /dev/null
+++ b/tests/dtds/HTML4.dtd
@@ -0,0 +1,1092 @@
+<!--
+ This is the HTML 4.0 Transitional DTD, which includes
+ presentation attributes and elements that W3C expects to phase out
+ as support for style sheets matures. Authors should use the Strict
+ DTD when possible, but may use the Transitional DTD when support
+ for presentation attribute and elements is required.
+
+ HTML 4.0 includes mechanisms for style sheets, scripting,
+ embedding objects, improved support for right to left and mixed
+ direction text, and enhancements to forms for improved
+ accessibility for people with disabilities.
+
+ Draft: $Date: 1999/05/02 15:37:15 $
+
+ Authors:
+ Dave Raggett <dsr@w3.org>
+ Arnaud Le Hors <lehors@w3.org>
+ Ian Jacobs <ij@w3.org>
+
+ Further information about HTML 4.0 is available at:
+
+ http://www.w3.org/TR/REC-html40
+-->
+<!ENTITY % HTML.Version "-//W3C//DTD HTML 4.0 Transitional//EN"
+ -- Typical usage:
+
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
+ "http://www.w3.org/TR/REC-html40/loose.dtd">
+ <html>
+ <head>
+ ...
+ </head>
+ <body>
+ ...
+ </body>
+ </html>
+
+ The URI used as a system identifier with the public identifier allows
+ the user agent to download the DTD and entity sets as needed.
+
+ The FPI for the Strict HTML 4.0 DTD is:
+
+ "-//W3C//DTD HTML 4.0//EN"
+
+ and its URI is:
+
+ http://www.w3.org/TR/REC-html40/strict.dtd
+
+ Authors should use the Strict DTD unless they need the
+ presentation control for user agents that don't (adequately)
+ support style sheets.
+
+ If you are writing a document that includes frames, use
+ the following FPI:
+
+ "-//W3C//DTD HTML 4.0 Frameset//EN"
+
+ with the URI:
+
+ http://www.w3.org/TR/REC-html40/frameset.dtd
+
+ The following URIs are supported in relation to HTML 4.0
+
+ "http://www.w3.org/TR/REC-html40/strict.dtd" (Strict DTD)
+ "http://www.w3.org/TR/REC-html40/loose.dtd" (Loose DTD)
+ "http://www.w3.org/TR/REC-html40/frameset.dtd" (Frameset DTD)
+ "http://www.w3.org/TR/REC-html40/HTMLlat1.ent" (Latin-1 entities)
+ "http://www.w3.org/TR/REC-html40/HTMLsymbol.ent" (Symbol entities)
+ "http://www.w3.org/TR/REC-html40/HTMLspecial.ent" (Special entities)
+
+ These URIs point to the latest version of each file. To reference
+ this specific revision use the following URIs:
+
+ "http://www.w3.org/TR/REC-html40-971218/strict.dtd"
+ "http://www.w3.org/TR/REC-html40-971218/loose.dtd"
+ "http://www.w3.org/TR/REC-html40-971218/frameset.dtd"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLlat1.ent"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLsymbol.ent"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLspecial.ent"
+
+-->
+
+<!--================== Imported Names ====================================-->
+
+<!ENTITY % ContentType "CDATA"
+ -- media type, as per [RFC2045]
+ -->
+
+<!ENTITY % ContentTypes "CDATA"
+ -- comma-separated list of media types, as per [RFC2045]
+ -->
+
+<!ENTITY % Charset "CDATA"
+ -- a character encoding, as per [RFC2045]
+ -->
+
+<!ENTITY % Charsets "CDATA"
+ -- a space separated list of character encodings, as per [RFC2045]
+ -->
+
+<!ENTITY % LanguageCode "NAME"
+ -- a language code, as per [RFC1766]
+ -->
+
+<!ENTITY % Character "CDATA"
+ -- a single character from [ISO10646]
+ -->
+
+<!ENTITY % LinkTypes "CDATA"
+ -- space-separated list of link types
+ -->
+
+<!ENTITY % MediaDesc "CDATA"
+ -- single or comma-separated list of media descriptors
+ -->
+
+<!ENTITY % URI "CDATA"
+ -- a Uniform Resource Identifier,
+ see [URI]
+ -->
+
+<!ENTITY % Datetime "CDATA" -- date and time information. ISO date format -->
+
+
+<!ENTITY % Script "CDATA" -- script expression -->
+
+<!ENTITY % StyleSheet "CDATA" -- style sheet data -->
+
+<!ENTITY % FrameTarget "CDATA" -- render in this frame -->
+
+
+<!ENTITY % Text "CDATA">
+
+
+<!-- Parameter Entities -->
+
+<!ENTITY % head.misc "SCRIPT|STYLE|META|LINK|OBJECT" -- repeatable head elements -->
+
+<!ENTITY % heading "H1|H2|H3|H4|H5|H6">
+
+<!ENTITY % list "UL | OL | DIR | MENU">
+
+<!ENTITY % preformatted "PRE">
+
+<!ENTITY % Color "CDATA" -- a color using sRGB: #RRGGBB as Hex values -->
+
+<!-- There are also 16 widely known color names with their sRGB values:
+
+ Black = #000000 Green = #008000
+ Silver = #C0C0C0 Lime = #00FF00
+ Gray = #808080 Olive = #808000
+ White = #FFFFFF Yellow = #FFFF00
+ Maroon = #800000 Navy = #000080
+ Red = #FF0000 Blue = #0000FF
+ Purple = #800080 Teal = #008080
+ Fuchsia= #FF00FF Aqua = #00FFFF
+ -->
+
+<!ENTITY % bodycolors "
+ bgcolor %Color; #IMPLIED -- document background color --
+ text %Color; #IMPLIED -- document text color --
+ link %Color; #IMPLIED -- color of links --
+ vlink %Color; #IMPLIED -- color of visited links --
+ alink %Color; #IMPLIED -- color of selected links --
+ ">
+
+<!--================ Character mnemonic entities =========================-->
+
+<!ENTITY % HTMLlat1 PUBLIC
+ "-//W3C//ENTITIES Latin1//EN//HTML"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLlat1.ent">
+%HTMLlat1;
+
+<!ENTITY % HTMLsymbol PUBLIC
+ "-//W3C//ENTITIES Symbols//EN//HTML"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLsymbol.ent">
+%HTMLsymbol;
+
+<!ENTITY % HTMLspecial PUBLIC
+ "-//W3C//ENTITIES Special//EN//HTML"
+ "http://www.w3.org/TR/REC-html40-971218/HTMLspecial.ent">
+%HTMLspecial;
+<!--=================== Generic Attributes ===============================-->
+
+<!ENTITY % coreattrs
+ "id ID #IMPLIED -- document-wide unique id --
+ class CDATA #IMPLIED -- space separated list of classes --
+ style %StyleSheet; #IMPLIED -- associated style info --
+ title %Text; #IMPLIED -- advisory title/amplification --"
+ >
+
+<!ENTITY % i18n
+ "lang %LanguageCode; #IMPLIED -- language code --
+ dir (ltr|rtl) #IMPLIED -- direction for weak/neutral text --"
+ >
+
+<!ENTITY % events
+ "onclick %Script; #IMPLIED -- a pointer button was clicked --
+ ondblclick %Script; #IMPLIED -- a pointer button was double clicked--
+ onmousedown %Script; #IMPLIED -- a pointer button was pressed down --
+ onmouseup %Script; #IMPLIED -- a pointer button was released --
+ onmouseover %Script; #IMPLIED -- a pointer was moved onto --
+ onmousemove %Script; #IMPLIED -- a pointer was moved within --
+ onmouseout %Script; #IMPLIED -- a pointer was moved away --
+ onkeypress %Script; #IMPLIED -- a key was pressed and released --
+ onkeydown %Script; #IMPLIED -- a key was pressed down --
+ onkeyup %Script; #IMPLIED -- a key was released --"
+ >
+
+<!-- Reserved Feature Switch -->
+<!ENTITY % HTML.Reserved "IGNORE">
+
+<!-- The following attributes are reserved for possible future use -->
+<![ %HTML.Reserved; [
+<!ENTITY % reserved
+ "datasrc %URI; #IMPLIED -- a single or tabular Data Source --
+ datafld CDATA #IMPLIED -- the property or column name --
+ dataformatas (plaintext|html) plaintext -- text or html --"
+ >
+]]>
+
+<!ENTITY % reserved "">
+
+<!ENTITY % attrs "%coreattrs; %i18n; %events;">
+
+<!ENTITY % align "align (left|center|right|justify) #IMPLIED"
+ -- default is left for ltr paragraphs, right for rtl --
+ >
+
+<!--=================== Text Markup ======================================-->
+
+<!ENTITY % fontstyle
+ "TT | I | B | U | S | STRIKE | BIG | SMALL">
+
+<!ENTITY % phrase "EM | STRONG | DFN | CODE |
+ SAMP | KBD | VAR | CITE | ABBR | ACRONYM" >
+
+<!ENTITY % special
+ "A | IMG | APPLET | OBJECT | FONT | BASEFONT | BR | SCRIPT |
+ MAP | Q | SUB | SUP | SPAN | BDO | IFRAME">
+
+<!ENTITY % formctrl "INPUT | SELECT | TEXTAREA | LABEL | BUTTON">
+
+<!-- %inline; covers inline or "text-level" elements -->
+<!ENTITY % inline "#PCDATA | %fontstyle; | %phrase; | %special; | %formctrl;">
+
+<!ELEMENT (%fontstyle;|%phrase;) - - (%inline;)*>
+<!ATTLIST (%fontstyle;|%phrase;)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT (SUB|SUP) - - (%inline;)* -- subscript, superscript -->
+<!ATTLIST (SUB|SUP)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT SPAN - - (%inline;)* -- generic language/style container -->
+<!ATTLIST SPAN
+ %attrs; -- %coreattrs, %i18n, %events --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT BDO - - (%inline;)* -- I18N BiDi over-ride -->
+<!ATTLIST BDO
+ %coreattrs; -- id, class, style, title --
+ lang %LanguageCode; #IMPLIED -- language code --
+ dir (ltr|rtl) #REQUIRED -- directionality --
+ >
+
+<!ELEMENT BASEFONT - O EMPTY -- base font size -->
+<!ATTLIST BASEFONT
+ id ID #IMPLIED -- document-wide unique id --
+ size CDATA #REQUIRED -- base font size for FONT elements --
+ color %Color; #IMPLIED -- text color --
+ face CDATA #IMPLIED -- comma separated list of font names --
+ >
+
+<!ELEMENT FONT - - (%inline;)* -- local change to font -->
+<!ATTLIST FONT
+ %coreattrs; -- id, class, style, title --
+ %i18n; -- lang, dir --
+ size CDATA #IMPLIED -- [+|-]nn e.g. size="+1", size="4" --
+ color %Color; #IMPLIED -- text color --
+ face CDATA #IMPLIED -- comma separated list of font names --
+ >
+
+<!ELEMENT BR - O EMPTY -- forced line break -->
+<!ATTLIST BR
+ %coreattrs; -- id, class, style, title --
+ clear (left|all|right|none) none -- control of text flow --
+ >
+
+<!--================== HTML content models ===============================-->
+
+<!--
+ HTML has two basic content models:
+
+ %inline; character level elements and text strings
+ %block; block-like elements e.g. paragraphs and lists
+-->
+
+<!ENTITY % block
+ "P | %heading; | %list; | %preformatted; | DL | DIV | CENTER |
+ NOSCRIPT | NOFRAMES | BLOCKQUOTE | FORM | ISINDEX | HR |
+ TABLE | FIELDSET | ADDRESS">
+
+<!ENTITY % flow "%block; | %inline;">
+
+<!--=================== Document Body ====================================-->
+
+<!ELEMENT BODY O O (%flow;)* +(INS|DEL) -- document body -->
+<!ATTLIST BODY
+ %attrs; -- %coreattrs, %i18n, %events --
+ onload %Script; #IMPLIED -- the document has been loaded --
+ onunload %Script; #IMPLIED -- the document has been removed --
+ background %URI; #IMPLIED -- texture tile for document
+ background --
+ %bodycolors; -- bgcolor, text, link, vlink, alink --
+ >
+
+<!ELEMENT ADDRESS - - ((%inline;)|P)* -- information on author -->
+<!ATTLIST ADDRESS
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT DIV - - (%flow;)* -- generic language/style container -->
+<!ATTLIST DIV
+ %attrs; -- %coreattrs, %i18n, %events --
+ %align; -- align, text alignment --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT CENTER - - (%flow;)* -- shorthand for DIV align=center -->
+<!ATTLIST CENTER
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--================== The Anchor Element ================================-->
+
+<!ENTITY % Shape "(rect|circle|poly|default)">
+<!ENTITY % Coords "CDATA" -- comma separated list of lengths -->
+
+<!ELEMENT A - - (%inline;)* -(A) -- anchor -->
+<!ATTLIST A
+ %attrs; -- %coreattrs, %i18n, %events --
+ charset %Charset; #IMPLIED -- char encoding of linked resource --
+ type %ContentType; #IMPLIED -- advisory content type --
+ name CDATA #IMPLIED -- named link end --
+ href %URI; #IMPLIED -- URI for linked resource --
+ hreflang %LanguageCode; #IMPLIED -- language code --
+ target %FrameTarget; #IMPLIED -- render in this frame --
+ rel %LinkTypes; #IMPLIED -- forward link types --
+ rev %LinkTypes; #IMPLIED -- reverse link types --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ shape %Shape; rect -- for use with client-side image maps --
+ coords %Coords; #IMPLIED -- for use with client-side image maps --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ >
+
+<!--================== Client-side image maps ============================-->
+
+<!-- These can be placed in the same document or grouped in a
+ separate document although this isn't yet widely supported -->
+
+<!ELEMENT MAP - - ((%block;)+ | AREA+) -- client-side image map -->
+<!ATTLIST MAP
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #REQUIRED -- for reference by usemap --
+ >
+
+<!ELEMENT AREA - O EMPTY -- client-side image map area -->
+<!ATTLIST AREA
+ %attrs; -- %coreattrs, %i18n, %events --
+ shape %Shape; rect -- controls interpretation of coords --
+ coords %Coords; #IMPLIED -- comma separated list of lengths --
+ href %URI; #IMPLIED -- URI for linked resource --
+ target %FrameTarget; #IMPLIED -- render in this frame --
+ nohref (nohref) #IMPLIED -- this region has no action --
+ alt %Text; #REQUIRED -- short description --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ >
+
+<!--================== The LINK Element ==================================-->
+
+<!--
+ Relationship values can be used in principle:
+
+ a) for document specific toolbars/menus when used
+ with the LINK element in document head e.g.
+ start, contents, previous, next, index, end, help
+ b) to link to a separate style sheet (rel=stylesheet)
+ c) to make a link to a script (rel=script)
+ d) by stylesheets to control how collections of
+ html nodes are rendered into printed documents
+ e) to make a link to a printable version of this document
+ e.g. a postscript or pdf version (rel=alternate media=print)
+-->
+
+<!ELEMENT LINK - O EMPTY -- a media-independent link -->
+<!ATTLIST LINK
+ %attrs; -- %coreattrs, %i18n, %events --
+ charset %Charset; #IMPLIED -- char encoding of linked resource --
+ href %URI; #IMPLIED -- URI for linked resource --
+ hreflang %LanguageCode; #IMPLIED -- language code --
+ type %ContentType; #IMPLIED -- advisory content type --
+ rel %LinkTypes; #IMPLIED -- forward link types --
+ rev %LinkTypes; #IMPLIED -- reverse link types --
+ media %MediaDesc; #IMPLIED -- for rendering on these media --
+ target %FrameTarget; #IMPLIED -- render in this frame --
+ >
+
+<!--=================== Images ===========================================-->
+
+<!-- Length defined in strict DTD for cellpadding/cellspacing -->
+<!ENTITY % Length "CDATA" -- nn for pixels or nn% for percentage length -->
+<!ENTITY % MultiLength "CDATA" -- pixel, percentage, or relative -->
+
+<!ENTITY % MultiLengths "CDATA" -- comma-separated list of MultiLength -->
+
+<!ENTITY % Pixels "CDATA" -- integer representing length in pixels -->
+
+<!ENTITY % IAlign "(top|middle|bottom|left|right)" -- center? -->
+
+<!-- To avoid problems with text-only UAs as well as
+ to make image content understandable and navigable
+ to users of non-visual UAs, you need to provide
+ a description with ALT, and avoid server-side image maps -->
+<!ELEMENT IMG - O EMPTY -- Embedded image -->
+<!ATTLIST IMG
+ %attrs; -- %coreattrs, %i18n, %events --
+ src %URI; #REQUIRED -- URI of image to embed --
+ alt %Text; #REQUIRED -- short description --
+ longdesc %URI; #IMPLIED -- link to long description
+ (complements alt) --
+ height %Length; #IMPLIED -- override height --
+ width %Length; #IMPLIED -- override width --
+ usemap %URI; #IMPLIED -- use client-side image map --
+ ismap (ismap) #IMPLIED -- use server-side image map --
+ align %IAlign; #IMPLIED -- vertical or horizontal alignment --
+ border %Length; #IMPLIED -- link border width --
+ hspace %Pixels; #IMPLIED -- horizontal gutter --
+ vspace %Pixels; #IMPLIED -- vertical gutter --
+ >
+
+<!-- USEMAP points to a MAP element which may be in this document
+ or an external document, although the latter is not widely supported -->
+
+<!--==================== OBJECT ======================================-->
+<!--
+ OBJECT is used to embed objects as part of HTML pages
+ PARAM elements should precede other content. SGML mixed content
+ model technicality precludes specifying this formally ...
+-->
+
+<!ELEMENT OBJECT - - (PARAM | %flow;)*
+ -- generic embedded object -->
+<!ATTLIST OBJECT
+ %attrs; -- %coreattrs, %i18n, %events --
+ declare (declare) #IMPLIED -- declare but don't instantiate flag --
+ classid %URI; #IMPLIED -- identifies an implementation --
+ codebase %URI; #IMPLIED -- base URI for classid, data, archive--
+ data %URI; #IMPLIED -- reference to object's data --
+ type %ContentType; #IMPLIED -- content type for data --
+ codetype %ContentType; #IMPLIED -- content type for code --
+ archive %URI; #IMPLIED -- space separated archive list --
+ standby %Text; #IMPLIED -- message to show while loading --
+ height %Length; #IMPLIED -- override height --
+ width %Length; #IMPLIED -- override width --
+ usemap %URI; #IMPLIED -- use client-side image map --
+ name CDATA #IMPLIED -- submit as part of form --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ align %IAlign; #IMPLIED -- vertical or horizontal alignment --
+ border %Length; #IMPLIED -- link border width --
+ hspace %Pixels; #IMPLIED -- horizontal gutter --
+ vspace %Pixels; #IMPLIED -- vertical gutter --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT PARAM - O EMPTY -- named property value -->
+<!ATTLIST PARAM
+ id ID #IMPLIED -- document-wide unique id --
+ name CDATA #REQUIRED -- property name --
+ value CDATA #IMPLIED -- property value --
+ valuetype (DATA|REF|OBJECT) DATA -- How to interpret value --
+ type %ContentType; #IMPLIED -- content type for value
+ when valuetype=ref --
+ >
+
+<!--=================== Java APPLET ==================================-->
+<!--
+ One of code or object attributes must be present.
+ Place PARAM elements before other content.
+-->
+<!ELEMENT APPLET - - (PARAM | %flow;)* -- Java applet -->
+<!ATTLIST APPLET
+ %coreattrs; -- id, class, style, title --
+ codebase %URI; #IMPLIED -- optional base URI for applet --
+ archive CDATA #IMPLIED -- comma separated archive list --
+ code CDATA #IMPLIED -- applet class file --
+ object CDATA #IMPLIED -- serialized applet file --
+ alt %Text; #IMPLIED -- short description --
+ name CDATA #IMPLIED -- allows applets to find each other --
+ width %Length; #REQUIRED -- initial width --
+ height %Length; #REQUIRED -- initial height --
+ align %IAlign; #IMPLIED -- vertical or horizontal alignment --
+ hspace %Pixels; #IMPLIED -- horizontal gutter --
+ vspace %Pixels; #IMPLIED -- vertical gutter --
+ >
+
+<!--=================== Horizontal Rule ==================================-->
+
+<!ELEMENT HR - O EMPTY -- horizontal rule -->
+<!ATTLIST HR
+ %coreattrs; -- id, class, style, title --
+ %events;
+ align (left|center|right) #IMPLIED
+ noshade (noshade) #IMPLIED
+ size %Pixels; #IMPLIED
+ width %Length; #IMPLIED
+ >
+
+<!--=================== Paragraphs =======================================-->
+
+<!ELEMENT P - O (%inline;)* -- paragraph -->
+<!ATTLIST P
+ %attrs; -- %coreattrs, %i18n, %events --
+ %align; -- align, text alignment --
+ >
+
+<!--=================== Headings =========================================-->
+
+<!--
+ There are six levels of headings from H1 (the most important)
+ to H6 (the least important).
+-->
+
+<!ELEMENT (%heading;) - - (%inline;)* -- heading -->
+<!ATTLIST (%heading;)
+ %attrs; -- %coreattrs, %i18n, %events --
+ %align; -- align, text alignment --
+ >
+
+<!--=================== Preformatted Text ================================-->
+
+<!-- excludes markup for images and changes in font size -->
+<!ENTITY % pre.exclusion "IMG|OBJECT|APPLET|BIG|SMALL|SUB|SUP|FONT|BASEFONT">
+
+<!ELEMENT PRE - - (%inline;)* -(%pre.exclusion;) -- preformatted text -->
+<!ATTLIST PRE
+ %attrs; -- %coreattrs, %i18n, %events --
+ width NUMBER #IMPLIED
+ >
+
+<!--===================== Inline Quotes ==================================-->
+
+<!ELEMENT Q - - (%inline;)* -- short inline quotation -->
+<!ATTLIST Q
+ %attrs; -- %coreattrs, %i18n, %events --
+ cite %URI; #IMPLIED -- URI for source document or msg --
+ >
+
+<!--=================== Block-like Quotes ================================-->
+
+<!ELEMENT BLOCKQUOTE - - (%flow;)* -- long quotation -->
+<!ATTLIST BLOCKQUOTE
+ %attrs; -- %coreattrs, %i18n, %events --
+ cite %URI; #IMPLIED -- URI for source document or msg --
+ >
+
+<!--=================== Inserted/Deleted Text ============================-->
+
+
+<!-- INS/DEL are handled by inclusion on BODY -->
+<!ELEMENT (INS|DEL) - - (%flow;)* -- inserted text, deleted text -->
+<!ATTLIST (INS|DEL)
+ %attrs; -- %coreattrs, %i18n, %events --
+ cite %URI; #IMPLIED -- info on reason for change --
+ datetime %Datetime; #IMPLIED -- date and time of change --
+ >
+
+<!--=================== Lists ============================================-->
+
+<!-- definition lists - DT for term, DD for its definition -->
+
+<!ELEMENT DL - - (DT|DD)+ -- definition list -->
+<!ATTLIST DL
+ %attrs; -- %coreattrs, %i18n, %events --
+ compact (compact) #IMPLIED -- reduced interitem spacing --
+ >
+
+<!ELEMENT DT - O (%inline;)* -- definition term -->
+<!ELEMENT DD - O (%flow;)* -- definition description -->
+<!ATTLIST (DT|DD)
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!-- Ordered lists (OL) Numbering style
+
+ 1 arablic numbers 1, 2, 3, ...
+ a lower alpha a, b, c, ...
+ A upper alpha A, B, C, ...
+ i lower roman i, ii, iii, ...
+ I upper roman I, II, III, ...
+
+ The style is applied to the sequence number which by default
+ is reset to 1 for the first list item in an ordered list.
+
+ This can't be expressed directly in SGML due to case folding.
+-->
+
+<!ENTITY % OLStyle "CDATA" -- constrained to: "(1|a|A|i|I)" -->
+
+<!ELEMENT OL - - (LI)+ -- ordered list -->
+<!ATTLIST OL
+ %attrs; -- %coreattrs, %i18n, %events --
+ type %OLStyle; #IMPLIED -- numbering style --
+ compact (compact) #IMPLIED -- reduced interitem spacing --
+ start NUMBER #IMPLIED -- starting sequence number --
+ >
+
+<!-- Unordered Lists (UL) bullet styles -->
+<!ENTITY % ULStyle "(disc|square|circle)">
+
+<!ELEMENT UL - - (LI)+ -- unordered list -->
+<!ATTLIST UL
+ %attrs; -- %coreattrs, %i18n, %events --
+ type %ULStyle; #IMPLIED -- bullet style --
+ compact (compact) #IMPLIED -- reduced interitem spacing --
+ >
+
+<!ELEMENT (DIR|MENU) - - (LI)+ -(%block;) -- directory list, menu list -->
+<!ATTLIST DIR
+ %attrs; -- %coreattrs, %i18n, %events --
+ compact (compact) #IMPLIED
+ >
+<!ATTLIST MENU
+ %attrs; -- %coreattrs, %i18n, %events --
+ compact (compact) #IMPLIED
+ >
+
+<!ENTITY % LIStyle "CDATA" -- constrained to: "(%ULStyle;|%OLStyle;)" -->
+
+<!ELEMENT LI - O (%flow;)* -- list item -->
+<!ATTLIST LI
+ %attrs; -- %coreattrs, %i18n, %events --
+ type %LIStyle; #IMPLIED -- list item style --
+ value NUMBER #IMPLIED -- reset sequence number --
+ >
+
+<!--================ Forms ===============================================-->
+<!ELEMENT FORM - - (%flow;)* -(FORM) -- interactive form -->
+<!ATTLIST FORM
+ %attrs; -- %coreattrs, %i18n, %events --
+ action %URI; #REQUIRED -- server-side form handler --
+ method (GET|POST) GET -- HTTP method used to submit the form--
+ enctype %ContentType; "application/x-www-form-urlencoded"
+ onsubmit %Script; #IMPLIED -- the form was submitted --
+ onreset %Script; #IMPLIED -- the form was reset --
+ target %FrameTarget; #IMPLIED -- render in this frame --
+ accept-charset %Charsets; #IMPLIED -- list of supported charsets --
+ >
+
+<!-- Each label must not contain more than ONE field -->
+<!ELEMENT LABEL - - (%inline;)* -(LABEL) -- form field label text -->
+<!ATTLIST LABEL
+ %attrs; -- %coreattrs, %i18n, %events --
+ for IDREF #IMPLIED -- matches field ID value --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ >
+
+<!ENTITY % InputType
+ "(TEXT | PASSWORD | CHECKBOX |
+ RADIO | SUBMIT | RESET |
+ FILE | HIDDEN | IMAGE | BUTTON)"
+ >
+
+<!-- attribute name required for all but submit & reset -->
+<!ELEMENT INPUT - O EMPTY -- form control -->
+<!ATTLIST INPUT
+ %attrs; -- %coreattrs, %i18n, %events --
+ type %InputType; TEXT -- what kind of widget is needed --
+ name CDATA #IMPLIED -- submit as part of form --
+ value CDATA #IMPLIED -- required for radio and checkboxes --
+ checked (checked) #IMPLIED -- for radio buttons and check boxes --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ readonly (readonly) #IMPLIED -- for text and passwd --
+ size CDATA #IMPLIED -- specific to each type of field --
+ maxlength NUMBER #IMPLIED -- max chars for text fields --
+ src %URI; #IMPLIED -- for fields with images --
+ alt CDATA #IMPLIED -- short description --
+ usemap %URI; #IMPLIED -- use client-side image map --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ onselect %Script; #IMPLIED -- some text was selected --
+ onchange %Script; #IMPLIED -- the element value was changed --
+ accept %ContentTypes; #IMPLIED -- list of MIME types for file upload --
+ align %IAlign; #IMPLIED -- vertical or horizontal alignment --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT SELECT - - (OPTGROUP|OPTION)+ -- option selector -->
+<!ATTLIST SELECT
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #IMPLIED -- field name --
+ size NUMBER #IMPLIED -- rows visible --
+ multiple (multiple) #IMPLIED -- default is single selection --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ onchange %Script; #IMPLIED -- the element value was changed --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!ELEMENT OPTGROUP - - (OPTION)+ -- option group -->
+<!ATTLIST OPTGROUP
+ %attrs; -- %coreattrs, %i18n, %events --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ label %Text; #REQUIRED -- for use in hierarchical menus --
+ >
+
+<!ELEMENT OPTION - O (#PCDATA) -- selectable choice -->
+<!ATTLIST OPTION
+ %attrs; -- %coreattrs, %i18n, %events --
+ selected (selected) #IMPLIED
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ label %Text; #IMPLIED -- for use in hierarchical menus --
+ value CDATA #IMPLIED -- defaults to element content --
+ >
+
+<!ELEMENT TEXTAREA - - (#PCDATA) -- multi-line text field -->
+<!ATTLIST TEXTAREA
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #IMPLIED
+ rows NUMBER #REQUIRED
+ cols NUMBER #REQUIRED
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ readonly (readonly) #IMPLIED
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ onselect %Script; #IMPLIED -- some text was selected --
+ onchange %Script; #IMPLIED -- the element value was changed --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!--
+ #PCDATA is to solve the mixed content problem,
+ per specification only whitespace is allowed there!
+ -->
+<!ELEMENT FIELDSET - - (#PCDATA,LEGEND,(%flow;)*) -- form control group -->
+<!ATTLIST FIELDSET
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!ELEMENT LEGEND - - (%inline;)* -- fieldset legend -->
+<!ENTITY % LAlign "(top|bottom|left|right)">
+
+<!ATTLIST LEGEND
+ %attrs; -- %coreattrs, %i18n, %events --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ align %LAlign; #IMPLIED -- relative to fieldset --
+ >
+
+<!ELEMENT BUTTON - -
+ (%flow;)* -(A|%formctrl;|FORM|ISINDEX|FIELDSET|IFRAME)
+ -- push button -->
+<!ATTLIST BUTTON
+ %attrs; -- %coreattrs, %i18n, %events --
+ name CDATA #IMPLIED
+ value CDATA #IMPLIED -- sent to server when submitted --
+ type (button|submit|reset) submit -- for use as form button --
+ disabled (disabled) #IMPLIED -- unavailable in this context --
+ tabindex NUMBER #IMPLIED -- position in tabbing order --
+ accesskey %Character; #IMPLIED -- accessibility key character --
+ onfocus %Script; #IMPLIED -- the element got the focus --
+ onblur %Script; #IMPLIED -- the element lost the focus --
+ %reserved; -- reserved for possible future use --
+ >
+
+<!--======================= Tables =======================================-->
+
+<!-- IETF HTML table standard, see [RFC1942] -->
+
+<!--
+ The BORDER attribute sets the thickness of the frame around the
+ table. The default units are screen pixels.
+
+ The FRAME attribute specifies which parts of the frame around
+ the table should be rendered. The values are not the same as
+ CALS to avoid a name clash with the VALIGN attribute.
+
+ The value "border" is included for backwards compatibility with
+ <TABLE BORDER> which yields frame=border and border=implied
+ For <TABLE BORDER=1> you get border=1 and frame=implied. In this
+ case, it is appropriate to treat this as frame=border for backwards
+ compatibility with deployed browsers.
+-->
+<!ENTITY % TFrame "(void|above|below|hsides|lhs|rhs|vsides|box|border)">
+
+<!--
+ The RULES attribute defines which rules to draw between cells:
+
+ If RULES is absent then assume:
+ "none" if BORDER is absent or BORDER=0 otherwise "all"
+-->
+
+<!ENTITY % TRules "(none | groups | rows | cols | all)">
+
+<!-- horizontal placement of table relative to document -->
+<!ENTITY % TAlign "(left|center|right)">
+
+<!-- horizontal alignment attributes for cell contents -->
+<!ENTITY % cellhalign
+ "align (left|center|right|justify|char) #IMPLIED
+ char %Character; #IMPLIED -- alignment char, e.g. char=':' --
+ charoff %Length; #IMPLIED -- offset for alignment char --"
+ >
+
+<!-- vertical alignment attributes for cell contents -->
+<!ENTITY % cellvalign
+ "valign (top|middle|bottom|baseline) #IMPLIED"
+ >
+
+<!ELEMENT TABLE - -
+ (CAPTION?, (COL*|COLGROUP*), THEAD?, TFOOT?, TBODY+)>
+<!ELEMENT CAPTION - - (%inline;)* -- table caption -->
+<!ELEMENT THEAD - O (TR)+ -- table header -->
+<!ELEMENT TFOOT - O (TR)+ -- table footer -->
+<!ELEMENT TBODY O O (TR)+ -- table body -->
+<!ELEMENT COLGROUP - O (col)* -- table column group -->
+<!ELEMENT COL - O EMPTY -- table column -->
+<!ELEMENT TR - O (TH|TD)+ -- table row -->
+<!ELEMENT (TH|TD) - O (%flow;)* -- table header cell, table data cell-->
+
+<!ATTLIST TABLE -- table element --
+ %attrs; -- %coreattrs, %i18n, %events --
+ summary %Text; #IMPLIED -- purpose/structure for speech output--
+ width %Length; #IMPLIED -- table width --
+ border %Pixels; #IMPLIED -- controls frame width around table --
+ frame %TFrame; #IMPLIED -- which parts of frame to render --
+ rules %TRules; #IMPLIED -- rulings between rows and cols --
+ cellspacing %Length; #IMPLIED -- spacing between cells --
+ cellpadding %Length; #IMPLIED -- spacing within cells --
+ align %TAlign; #IMPLIED -- table position relative to window --
+ bgcolor %Color; #IMPLIED -- background color for cells --
+ %reserved; -- reserved for possible future use --
+ datapagesize CDATA #IMPLIED -- reserved for possible future use --
+ >
+
+<!ENTITY % CAlign "(top|bottom|left|right)">
+
+<!ATTLIST CAPTION
+ %attrs; -- %coreattrs, %i18n, %events --
+ align %CAlign; #IMPLIED -- relative to table --
+ >
+
+<!--
+COLGROUP groups a set of COL elements. It allows you to group
+several semantically related columns together.
+-->
+<!ATTLIST COLGROUP
+ %attrs; -- %coreattrs, %i18n, %events --
+ span NUMBER 1 -- default number of columns in group --
+ width %MultiLength; #IMPLIED -- default width for enclosed COLs --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+<!--
+ COL elements define the alignment properties for cells in
+ one or more columns.
+
+ The WIDTH attribute specifies the width of the columns, e.g.
+
+ width=64 width in screen pixels
+ width=0.5* relative width of 0.5
+
+ The SPAN attribute causes the attributes of one
+ COL element to apply to more than one column.
+-->
+<!ATTLIST COL -- column groups and properties --
+ %attrs; -- %coreattrs, %i18n, %events --
+ span NUMBER 1 -- COL attributes affect N columns --
+ width %MultiLength; #IMPLIED -- column width specification --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+<!--
+ Use THEAD to duplicate headers when breaking table
+ across page boundaries, or for static headers when
+ TBODY sections are rendered in scrolling panel.
+
+ Use TFOOT to duplicate footers when breaking table
+ across page boundaries, or for static footers when
+ TBODY sections are rendered in scrolling panel.
+
+ Use multiple TBODY sections when rules are needed
+ between groups of table rows.
+-->
+<!ATTLIST (THEAD|TBODY|TFOOT) -- table section --
+ %attrs; -- %coreattrs, %i18n, %events --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ >
+
+<!ATTLIST TR -- table row --
+ %attrs; -- %coreattrs, %i18n, %events --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ bgcolor %Color; #IMPLIED -- background color for row --
+ >
+
+
+<!-- Scope is simpler than axes attribute for common tables -->
+<!ENTITY % Scope "(row|col|rowgroup|colgroup)">
+
+<!-- TH is for headers, TD for data, but for cells acting as both use TD -->
+<!ATTLIST (TH|TD) -- header or data cell --
+ %attrs; -- %coreattrs, %i18n, %events --
+ abbr %Text; #IMPLIED -- abbreviation for header cell --
+ axis CDATA #IMPLIED -- names groups of related headers--
+ headers IDREFS #IMPLIED -- list of id's for header cells --
+ scope %Scope; #IMPLIED -- scope covered by header cells --
+ rowspan NUMBER 1 -- number of rows spanned by cell --
+ colspan NUMBER 1 -- number of cols spanned by cell --
+ %cellhalign; -- horizontal alignment in cells --
+ %cellvalign; -- vertical alignment in cells --
+ nowrap (nowrap) #IMPLIED -- suppress word wrap --
+ bgcolor %Color; #IMPLIED -- cell background color --
+ width %Pixels; #IMPLIED -- width for cell --
+ height %Pixels; #IMPLIED -- height for cell --
+ >
+
+<!--================== Document Frames ===================================-->
+
+<!--
+ The content model for HTML documents depends on whether the HEAD is
+ followed by a FRAMESET or BODY element. The widespread omission of
+ the BODY start tag makes it impractical to define the content model
+ without the use of a marked section.
+-->
+
+<!-- Feature Switch for frameset documents -->
+<!ENTITY % HTML.Frameset "IGNORE">
+
+<![ %HTML.Frameset; [
+<!ELEMENT FRAMESET - - ((FRAMESET|FRAME)+ & NOFRAMES?) -- window subdivision-->
+<!ATTLIST FRAMESET
+ %coreattrs; -- id, class, style, title --
+ rows %MultiLengths; #IMPLIED -- list of lengths,
+ default: 100% (1 row) --
+ cols %MultiLengths; #IMPLIED -- list of lengths,
+ default: 100% (1 col) --
+ onload %Script; #IMPLIED -- all the frames have been loaded --
+ onunload %Script; #IMPLIED -- all the frames have been removed --
+ >
+]]>
+
+<![ %HTML.Frameset; [
+<!-- reserved frame names start with "_" otherwise starts with letter -->
+<!ELEMENT FRAME - O EMPTY -- subwindow -->
+<!ATTLIST FRAME
+ %coreattrs; -- id, class, style, title --
+ longdesc %URI; #IMPLIED -- link to long description
+ (complements title) --
+ name CDATA #IMPLIED -- name of frame for targetting --
+ src %URI; #IMPLIED -- source of frame content --
+ frameborder (1|0) 1 -- request frame borders? --
+ marginwidth %Pixels; #IMPLIED -- margin widths in pixels --
+ marginheight %Pixels; #IMPLIED -- margin height in pixels --
+ noresize (noresize) #IMPLIED -- allow users to resize frames? --
+ scrolling (yes|no|auto) auto -- scrollbar or none --
+ >
+]]>
+
+<!ELEMENT IFRAME - - (%flow;)* -- inline subwindow -->
+<!ATTLIST IFRAME
+ %coreattrs; -- id, class, style, title --
+ longdesc %URI; #IMPLIED -- link to long description
+ (complements title) --
+ name CDATA #IMPLIED -- name of frame for targetting --
+ src %URI; #IMPLIED -- source of frame content --
+ frameborder (1|0) 1 -- request frame borders? --
+ marginwidth %Pixels; #IMPLIED -- margin widths in pixels --
+ marginheight %Pixels; #IMPLIED -- margin height in pixels --
+ scrolling (yes|no|auto) auto -- scrollbar or none --
+ align %IAlign; #IMPLIED -- vertical or horizontal alignment --
+ height %Length; #IMPLIED -- frame height --
+ width %Length; #IMPLIED -- frame width --
+ >
+
+<![ %HTML.Frameset; [
+<!ENTITY % noframes.content "(BODY) -(NOFRAMES)">
+]]>
+
+<!ENTITY % noframes.content "(%flow;)*">
+
+<!ELEMENT NOFRAMES - - %noframes.content;
+ -- alternate content container for non frame-based rendering -->
+<!ATTLIST NOFRAMES
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--================ Document Head =======================================-->
+<!-- %head.misc; defined earlier on as "SCRIPT|STYLE|META|LINK|OBJECT" -->
+<!ENTITY % head.content "TITLE & ISINDEX? & BASE?">
+
+<!ELEMENT HEAD O O (%head.content;) +(%head.misc;) -- document head -->
+<!ATTLIST HEAD
+ %i18n; -- lang, dir --
+ profile %URI; #IMPLIED -- named dictionary of meta info --
+ >
+
+<!-- The TITLE element is not considered part of the flow of text.
+ It should be displayed, for example as the page header or
+ window title. Exactly one title is required per document.
+ -->
+<!ELEMENT TITLE - - (#PCDATA) -(%head.misc;) -- document title -->
+<!ATTLIST TITLE %i18n>
+
+<!ELEMENT ISINDEX - O EMPTY -- single line prompt -->
+<!ATTLIST ISINDEX
+ %coreattrs; -- id, class, style, title --
+ %i18n; -- lang, dir --
+ prompt %Text; #IMPLIED -- prompt message -->
+
+<!ELEMENT BASE - O EMPTY -- document base URI -->
+<!ATTLIST BASE
+ href %URI; #IMPLIED -- URI that acts as base URI --
+ target %FrameTarget; #IMPLIED -- render in this frame --
+ >
+
+<!ELEMENT META - O EMPTY -- generic metainformation -->
+<!ATTLIST META
+ %i18n; -- lang, dir, for use with content --
+ http-equiv NAME #IMPLIED -- HTTP response header name --
+ name NAME #IMPLIED -- metainformation name --
+ content CDATA #REQUIRED -- associated information --
+ scheme CDATA #IMPLIED -- select form of content --
+ >
+
+<!ELEMENT STYLE - - %StyleSheet -- style info -->
+<!ATTLIST STYLE
+ %i18n; -- lang, dir, for use with title --
+ type %ContentType; #REQUIRED -- content type of style language --
+ media %MediaDesc; #IMPLIED -- designed for use with these media --
+ title %Text; #IMPLIED -- advisory title --
+ >
+
+<!ELEMENT SCRIPT - - %Script; -- script statements -->
+<!ATTLIST SCRIPT
+ charset %Charset; #IMPLIED -- char encoding of linked resource --
+ type %ContentType; #REQUIRED -- content type of script language --
+ language CDATA #IMPLIED -- predefined script language name --
+ src %URI; #IMPLIED -- URI for an external script --
+ defer (defer) #IMPLIED -- UA may defer execution of script --
+ event CDATA #IMPLIED -- reserved for possible future use --
+ for %URI; #IMPLIED -- reserved for possible future use --
+ >
+
+<!ELEMENT NOSCRIPT - - (%flow;)*
+ -- alternate content container for non script-based rendering -->
+<!ATTLIST NOSCRIPT
+ %attrs; -- %coreattrs, %i18n, %events --
+ >
+
+<!--================ Document Structure ==================================-->
+<!ENTITY % version "version CDATA #FIXED '%HTML.Version;'">
+
+<![ %HTML.Frameset; [
+<!ENTITY % html.content "HEAD, FRAMESET">
+]]>
+
+<!ENTITY % html.content "HEAD, BODY">
+
+<!ELEMENT HTML O O (%html.content;) -- document root element -->
+<!ATTLIST HTML
+ %i18n; -- lang, dir --
+ %version;
+ >
diff --git a/tests/dtds/HTML4.soc b/tests/dtds/HTML4.soc
new file mode 100644
index 0000000..ec4825f
--- /dev/null
+++ b/tests/dtds/HTML4.soc
@@ -0,0 +1,9 @@
+OVERRIDE YES
+SGMLDECL HTML4.dcl
+DOCTYPE HTML HTML4.dtd
+PUBLIC "-//W3C//DTD HTML 4.0//EN" HTML4-s.dtd
+PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" HTML4.dtd
+PUBLIC "-//W3C//DTD HTML 4.0 Frameset//EN" HTML4-f.dtd
+PUBLIC "-//W3C//ENTITIES Latin1//EN//HTML" HTMLlat1.ent
+PUBLIC "-//W3C//ENTITIES Special//EN//HTML" HTMLspec.ent
+PUBLIC "-//W3C//ENTITIES Symbols//EN//HTML" HTMLsym.ent
diff --git a/tests/dtds/HTMLlat1.ent b/tests/dtds/HTMLlat1.ent
new file mode 100644
index 0000000..7632023
--- /dev/null
+++ b/tests/dtds/HTMLlat1.ent
@@ -0,0 +1,195 @@
+<!-- Portions (C) International Organization for Standardization 1986
+ Permission to copy in any form is granted for use with
+ conforming SGML systems and applications as defined in
+ ISO 8879, provided this notice is included in all copies.
+-->
+<!-- Character entity set. Typical invocation:
+ <!ENTITY % HTMLlat1 PUBLIC
+ "-//W3C//ENTITIES Full Latin 1//EN//HTML">
+ %HTMLlat1;
+-->
+
+<!ENTITY nbsp CDATA "&#160;" -- no-break space = non-breaking space,
+ U+00A0 ISOnum -->
+<!ENTITY iexcl CDATA "&#161;" -- inverted exclamation mark, U+00A1 ISOnum -->
+<!ENTITY cent CDATA "&#162;" -- cent sign, U+00A2 ISOnum -->
+<!ENTITY pound CDATA "&#163;" -- pound sign, U+00A3 ISOnum -->
+<!ENTITY curren CDATA "&#164;" -- currency sign, U+00A4 ISOnum -->
+<!ENTITY yen CDATA "&#165;" -- yen sign = yuan sign, U+00A5 ISOnum -->
+<!ENTITY brvbar CDATA "&#166;" -- broken bar = broken vertical bar,
+ U+00A6 ISOnum -->
+<!ENTITY sect CDATA "&#167;" -- section sign, U+00A7 ISOnum -->
+<!ENTITY uml CDATA "&#168;" -- diaeresis = spacing diaeresis,
+ U+00A8 ISOdia -->
+<!ENTITY copy CDATA "&#169;" -- copyright sign, U+00A9 ISOnum -->
+<!ENTITY ordf CDATA "&#170;" -- feminine ordinal indicator, U+00AA ISOnum -->
+<!ENTITY laquo CDATA "&#171;" -- left-pointing double angle quotation mark
+ = left pointing guillemet, U+00AB ISOnum -->
+<!ENTITY not CDATA "&#172;" -- not sign = discretionary hyphen,
+ U+00AC ISOnum -->
+<!ENTITY shy CDATA "&#173;" -- soft hyphen = discretionary hyphen,
+ U+00AD ISOnum -->
+<!ENTITY reg CDATA "&#174;" -- registered sign = registered trade mark sign,
+ U+00AE ISOnum -->
+<!ENTITY macr CDATA "&#175;" -- macron = spacing macron = overline
+ = APL overbar, U+00AF ISOdia -->
+<!ENTITY deg CDATA "&#176;" -- degree sign, U+00B0 ISOnum -->
+<!ENTITY plusmn CDATA "&#177;" -- plus-minus sign = plus-or-minus sign,
+ U+00B1 ISOnum -->
+<!ENTITY sup2 CDATA "&#178;" -- superscript two = superscript digit two
+ = squared, U+00B2 ISOnum -->
+<!ENTITY sup3 CDATA "&#179;" -- superscript three = superscript digit three
+ = cubed, U+00B3 ISOnum -->
+<!ENTITY acute CDATA "&#180;" -- acute accent = spacing acute,
+ U+00B4 ISOdia -->
+<!ENTITY micro CDATA "&#181;" -- micro sign, U+00B5 ISOnum -->
+<!ENTITY para CDATA "&#182;" -- pilcrow sign = paragraph sign,
+ U+00B6 ISOnum -->
+<!ENTITY middot CDATA "&#183;" -- middle dot = Georgian comma
+ = Greek middle dot, U+00B7 ISOnum -->
+<!ENTITY cedil CDATA "&#184;" -- cedilla = spacing cedilla, U+00B8 ISOdia -->
+<!ENTITY sup1 CDATA "&#185;" -- superscript one = superscript digit one,
+ U+00B9 ISOnum -->
+<!ENTITY ordm CDATA "&#186;" -- masculine ordinal indicator,
+ U+00BA ISOnum -->
+<!ENTITY raquo CDATA "&#187;" -- right-pointing double angle quotation mark
+ = right pointing guillemet, U+00BB ISOnum -->
+<!ENTITY frac14 CDATA "&#188;" -- vulgar fraction one quarter
+ = fraction one quarter, U+00BC ISOnum -->
+<!ENTITY frac12 CDATA "&#189;" -- vulgar fraction one half
+ = fraction one half, U+00BD ISOnum -->
+<!ENTITY frac34 CDATA "&#190;" -- vulgar fraction three quarters
+ = fraction three quarters, U+00BE ISOnum -->
+<!ENTITY iquest CDATA "&#191;" -- inverted question mark
+ = turned question mark, U+00BF ISOnum -->
+<!ENTITY Agrave CDATA "&#192;" -- latin capital letter A with grave
+ = latin capital letter A grave,
+ U+00C0 ISOlat1 -->
+<!ENTITY Aacute CDATA "&#193;" -- latin capital letter A with acute,
+ U+00C1 ISOlat1 -->
+<!ENTITY Acirc CDATA "&#194;" -- latin capital letter A with circumflex,
+ U+00C2 ISOlat1 -->
+<!ENTITY Atilde CDATA "&#195;" -- latin capital letter A with tilde,
+ U+00C3 ISOlat1 -->
+<!ENTITY Auml CDATA "&#196;" -- latin capital letter A with diaeresis,
+ U+00C4 ISOlat1 -->
+<!ENTITY Aring CDATA "&#197;" -- latin capital letter A with ring above
+ = latin capital letter A ring,
+ U+00C5 ISOlat1 -->
+<!ENTITY AElig CDATA "&#198;" -- latin capital letter AE
+ = latin capital ligature AE,
+ U+00C6 ISOlat1 -->
+<!ENTITY Ccedil CDATA "&#199;" -- latin capital letter C with cedilla,
+ U+00C7 ISOlat1 -->
+<!ENTITY Egrave CDATA "&#200;" -- latin capital letter E with grave,
+ U+00C8 ISOlat1 -->
+<!ENTITY Eacute CDATA "&#201;" -- latin capital letter E with acute,
+ U+00C9 ISOlat1 -->
+<!ENTITY Ecirc CDATA "&#202;" -- latin capital letter E with circumflex,
+ U+00CA ISOlat1 -->
+<!ENTITY Euml CDATA "&#203;" -- latin capital letter E with diaeresis,
+ U+00CB ISOlat1 -->
+<!ENTITY Igrave CDATA "&#204;" -- latin capital letter I with grave,
+ U+00CC ISOlat1 -->
+<!ENTITY Iacute CDATA "&#205;" -- latin capital letter I with acute,
+ U+00CD ISOlat1 -->
+<!ENTITY Icirc CDATA "&#206;" -- latin capital letter I with circumflex,
+ U+00CE ISOlat1 -->
+<!ENTITY Iuml CDATA "&#207;" -- latin capital letter I with diaeresis,
+ U+00CF ISOlat1 -->
+<!ENTITY ETH CDATA "&#208;" -- latin capital letter ETH, U+00D0 ISOlat1 -->
+<!ENTITY Ntilde CDATA "&#209;" -- latin capital letter N with tilde,
+ U+00D1 ISOlat1 -->
+<!ENTITY Ograve CDATA "&#210;" -- latin capital letter O with grave,
+ U+00D2 ISOlat1 -->
+<!ENTITY Oacute CDATA "&#211;" -- latin capital letter O with acute,
+ U+00D3 ISOlat1 -->
+<!ENTITY Ocirc CDATA "&#212;" -- latin capital letter O with circumflex,
+ U+00D4 ISOlat1 -->
+<!ENTITY Otilde CDATA "&#213;" -- latin capital letter O with tilde,
+ U+00D5 ISOlat1 -->
+<!ENTITY Ouml CDATA "&#214;" -- latin capital letter O with diaeresis,
+ U+00D6 ISOlat1 -->
+<!ENTITY times CDATA "&#215;" -- multiplication sign, U+00D7 ISOnum -->
+<!ENTITY Oslash CDATA "&#216;" -- latin capital letter O with stroke
+ = latin capital letter O slash,
+ U+00D8 ISOlat1 -->
+<!ENTITY Ugrave CDATA "&#217;" -- latin capital letter U with grave,
+ U+00D9 ISOlat1 -->
+<!ENTITY Uacute CDATA "&#218;" -- latin capital letter U with acute,
+ U+00DA ISOlat1 -->
+<!ENTITY Ucirc CDATA "&#219;" -- latin capital letter U with circumflex,
+ U+00DB ISOlat1 -->
+<!ENTITY Uuml CDATA "&#220;" -- latin capital letter U with diaeresis,
+ U+00DC ISOlat1 -->
+<!ENTITY Yacute CDATA "&#221;" -- latin capital letter Y with acute,
+ U+00DD ISOlat1 -->
+<!ENTITY THORN CDATA "&#222;" -- latin capital letter THORN,
+ U+00DE ISOlat1 -->
+<!ENTITY szlig CDATA "&#223;" -- latin small letter sharp s = ess-zed,
+ U+00DF ISOlat1 -->
+<!ENTITY agrave CDATA "&#224;" -- latin small letter a with grave
+ = latin small letter a grave,
+ U+00E0 ISOlat1 -->
+<!ENTITY aacute CDATA "&#225;" -- latin small letter a with acute,
+ U+00E1 ISOlat1 -->
+<!ENTITY acirc CDATA "&#226;" -- latin small letter a with circumflex,
+ U+00E2 ISOlat1 -->
+<!ENTITY atilde CDATA "&#227;" -- latin small letter a with tilde,
+ U+00E3 ISOlat1 -->
+<!ENTITY auml CDATA "&#228;" -- latin small letter a with diaeresis,
+ U+00E4 ISOlat1 -->
+<!ENTITY aring CDATA "&#229;" -- latin small letter a with ring above
+ = latin small letter a ring,
+ U+00E5 ISOlat1 -->
+<!ENTITY aelig CDATA "&#230;" -- latin small letter ae
+ = latin small ligature ae, U+00E6 ISOlat1 -->
+<!ENTITY ccedil CDATA "&#231;" -- latin small letter c with cedilla,
+ U+00E7 ISOlat1 -->
+<!ENTITY egrave CDATA "&#232;" -- latin small letter e with grave,
+ U+00E8 ISOlat1 -->
+<!ENTITY eacute CDATA "&#233;" -- latin small letter e with acute,
+ U+00E9 ISOlat1 -->
+<!ENTITY ecirc CDATA "&#234;" -- latin small letter e with circumflex,
+ U+00EA ISOlat1 -->
+<!ENTITY euml CDATA "&#235;" -- latin small letter e with diaeresis,
+ U+00EB ISOlat1 -->
+<!ENTITY igrave CDATA "&#236;" -- latin small letter i with grave,
+ U+00EC ISOlat1 -->
+<!ENTITY iacute CDATA "&#237;" -- latin small letter i with acute,
+ U+00ED ISOlat1 -->
+<!ENTITY icirc CDATA "&#238;" -- latin small letter i with circumflex,
+ U+00EE ISOlat1 -->
+<!ENTITY iuml CDATA "&#239;" -- latin small letter i with diaeresis,
+ U+00EF ISOlat1 -->
+<!ENTITY eth CDATA "&#240;" -- latin small letter eth, U+00F0 ISOlat1 -->
+<!ENTITY ntilde CDATA "&#241;" -- latin small letter n with tilde,
+ U+00F1 ISOlat1 -->
+<!ENTITY ograve CDATA "&#242;" -- latin small letter o with grave,
+ U+00F2 ISOlat1 -->
+<!ENTITY oacute CDATA "&#243;" -- latin small letter o with acute,
+ U+00F3 ISOlat1 -->
+<!ENTITY ocirc CDATA "&#244;" -- latin small letter o with circumflex,
+ U+00F4 ISOlat1 -->
+<!ENTITY otilde CDATA "&#245;" -- latin small letter o with tilde,
+ U+00F5 ISOlat1 -->
+<!ENTITY ouml CDATA "&#246;" -- latin small letter o with diaeresis,
+ U+00F6 ISOlat1 -->
+<!ENTITY divide CDATA "&#247;" -- division sign, U+00F7 ISOnum -->
+<!ENTITY oslash CDATA "&#248;" -- latin small letter o with stroke,
+ = latin small letter o slash,
+ U+00F8 ISOlat1 -->
+<!ENTITY ugrave CDATA "&#249;" -- latin small letter u with grave,
+ U+00F9 ISOlat1 -->
+<!ENTITY uacute CDATA "&#250;" -- latin small letter u with acute,
+ U+00FA ISOlat1 -->
+<!ENTITY ucirc CDATA "&#251;" -- latin small letter u with circumflex,
+ U+00FB ISOlat1 -->
+<!ENTITY uuml CDATA "&#252;" -- latin small letter u with diaeresis,
+ U+00FC ISOlat1 -->
+<!ENTITY yacute CDATA "&#253;" -- latin small letter y with acute,
+ U+00FD ISOlat1 -->
+<!ENTITY thorn CDATA "&#254;" -- latin small letter thorn with,
+ U+00FE ISOlat1 -->
+<!ENTITY yuml CDATA "&#255;" -- latin small letter y with diaeresis,
+ U+00FF ISOlat1 --> \ No newline at end of file
diff --git a/tests/dtds/HTMLspec.ent b/tests/dtds/HTMLspec.ent
new file mode 100644
index 0000000..29011cc
--- /dev/null
+++ b/tests/dtds/HTMLspec.ent
@@ -0,0 +1,77 @@
+<!-- Special characters for HTML -->
+
+<!-- Character entity set. Typical invocation:
+ <!ENTITY % HTMLspecial PUBLIC
+ "-//W3C//ENTITIES Special//EN//HTML">
+ %HTMLspecial; -->
+
+<!-- Portions (C) International Organization for Standardization 1986:
+ Permission to copy in any form is granted for use with
+ conforming SGML systems and applications as defined in
+ ISO 8879, provided this notice is included in all copies.
+-->
+
+<!-- Relevant ISO entity set is given unless names are newly introduced.
+ New names (i.e., not in ISO 8879 list) do not clash with any
+ existing ISO 8879 entity names. ISO 10646 character numbers
+ are given for each character, in hex. CDATA values are decimal
+ conversions of the ISO 10646 values and refer to the document
+ character set. Names are Unicode 2.0 names.
+
+-->
+
+<!-- C0 Controls and Basic Latin -->
+<!ENTITY quot CDATA "&#34;" -- quotation mark = APL quote,
+ U+0022 ISOnum -->
+<!ENTITY amp CDATA "&#38;" -- ampersand, U+0026 ISOnum -->
+<!ENTITY lt CDATA "&#60;" -- less-than sign, U+003C ISOnum -->
+<!ENTITY gt CDATA "&#62;" -- greater-than sign, U+003E ISOnum -->
+
+<!-- Latin Extended-A -->
+<!ENTITY OElig CDATA "&#338;" -- latin capital ligature OE,
+ U+0152 ISOlat2 -->
+<!ENTITY oelig CDATA "&#339;" -- latin small ligature oe, U+0153 ISOlat2 -->
+<!-- ligature is a misnomer, this is a separate character in some languages -->
+<!ENTITY Scaron CDATA "&#352;" -- latin capital letter S with caron,
+ U+0160 ISOlat2 -->
+<!ENTITY scaron CDATA "&#353;" -- latin small letter s with caron,
+ U+0161 ISOlat2 -->
+<!ENTITY Yuml CDATA "&#376;" -- latin capital letter Y with diaeresis,
+ U+0178 ISOlat2 -->
+
+<!-- Spacing Modifier Letters -->
+<!ENTITY circ CDATA "&#710;" -- modifier letter circumflex accent,
+ U+02C6 ISOpub -->
+<!ENTITY tilde CDATA "&#732;" -- small tilde, U+02DC ISOdia -->
+
+<!-- General Punctuation -->
+<!ENTITY ensp CDATA "&#8194;" -- en space, U+2002 ISOpub -->
+<!ENTITY emsp CDATA "&#8195;" -- em space, U+2003 ISOpub -->
+<!ENTITY thinsp CDATA "&#8201;" -- thin space, U+2009 ISOpub -->
+<!ENTITY zwnj CDATA "&#8204;" -- zero width non-joiner,
+ U+200C NEW RFC 2070 -->
+<!ENTITY zwj CDATA "&#8205;" -- zero width joiner, U+200D NEW RFC 2070 -->
+<!ENTITY lrm CDATA "&#8206;" -- left-to-right mark, U+200E NEW RFC 2070 -->
+<!ENTITY rlm CDATA "&#8207;" -- right-to-left mark, U+200F NEW RFC 2070 -->
+<!ENTITY ndash CDATA "&#8211;" -- en dash, U+2013 ISOpub -->
+<!ENTITY mdash CDATA "&#8212;" -- em dash, U+2014 ISOpub -->
+<!ENTITY lsquo CDATA "&#8216;" -- left single quotation mark,
+ U+2018 ISOnum -->
+<!ENTITY rsquo CDATA "&#8217;" -- right single quotation mark,
+ U+2019 ISOnum -->
+<!ENTITY sbquo CDATA "&#8218;" -- single low-9 quotation mark, U+201A NEW -->
+<!ENTITY ldquo CDATA "&#8220;" -- left double quotation mark,
+ U+201C ISOnum -->
+<!ENTITY rdquo CDATA "&#8221;" -- right double quotation mark,
+ U+201D ISOnum -->
+<!ENTITY bdquo CDATA "&#8222;" -- double low-9 quotation mark, U+201E NEW -->
+<!ENTITY dagger CDATA "&#8224;" -- dagger, U+2020 ISOpub -->
+<!ENTITY Dagger CDATA "&#8225;" -- double dagger, U+2021 ISOpub -->
+<!ENTITY permil CDATA "&#8240;" -- per mille sign, U+2030 ISOtech -->
+<!ENTITY lsaquo CDATA "&#8249;" -- single left-pointing angle quotation mark,
+ U+2039 ISO proposed -->
+<!-- lsaquo is proposed but not yet ISO standardized -->
+<!ENTITY rsaquo CDATA "&#8250;" -- single right-pointing angle quotation mark,
+ U+203A ISO proposed -->
+<!-- rsaquo is proposed but not yet ISO standardized -->
+<!ENTITY euro CDATA "&#8364;" -- euro sign, U+20AC NEW --> \ No newline at end of file
diff --git a/tests/dtds/HTMLsym.ent b/tests/dtds/HTMLsym.ent
new file mode 100644
index 0000000..2a6250b
--- /dev/null
+++ b/tests/dtds/HTMLsym.ent
@@ -0,0 +1,241 @@
+<!-- Mathematical, Greek and Symbolic characters for HTML -->
+
+<!-- Character entity set. Typical invocation:
+ <!ENTITY % HTMLsymbol PUBLIC
+ "-//W3C//ENTITIES Symbolic//EN//HTML">
+ %HTMLsymbol; -->
+
+<!-- Portions (C) International Organization for Standardization 1986:
+ Permission to copy in any form is granted for use with
+ conforming SGML systems and applications as defined in
+ ISO 8879, provided this notice is included in all copies.
+-->
+
+<!-- Relevant ISO entity set is given unless names are newly introduced.
+ New names (i.e., not in ISO 8879 list) do not clash with any
+ existing ISO 8879 entity names. ISO 10646 character numbers
+ are given for each character, in hex. CDATA values are decimal
+ conversions of the ISO 10646 values and refer to the document
+ character set. Names are Unicode 2.0 names.
+
+-->
+
+<!-- Latin Extended-B -->
+<!ENTITY fnof CDATA "&#402;" -- latin small f with hook = function
+ = florin, U+0192 ISOtech -->
+
+<!-- Greek -->
+<!ENTITY Alpha CDATA "&#913;" -- greek capital letter alpha, U+0391 -->
+<!ENTITY Beta CDATA "&#914;" -- greek capital letter beta, U+0392 -->
+<!ENTITY Gamma CDATA "&#915;" -- greek capital letter gamma,
+ U+0393 ISOgrk3 -->
+<!ENTITY Delta CDATA "&#916;" -- greek capital letter delta,
+ U+0394 ISOgrk3 -->
+<!ENTITY Epsilon CDATA "&#917;" -- greek capital letter epsilon, U+0395 -->
+<!ENTITY Zeta CDATA "&#918;" -- greek capital letter zeta, U+0396 -->
+<!ENTITY Eta CDATA "&#919;" -- greek capital letter eta, U+0397 -->
+<!ENTITY Theta CDATA "&#920;" -- greek capital letter theta,
+ U+0398 ISOgrk3 -->
+<!ENTITY Iota CDATA "&#921;" -- greek capital letter iota, U+0399 -->
+<!ENTITY Kappa CDATA "&#922;" -- greek capital letter kappa, U+039A -->
+<!ENTITY Lambda CDATA "&#923;" -- greek capital letter lambda,
+ U+039B ISOgrk3 -->
+<!ENTITY Mu CDATA "&#924;" -- greek capital letter mu, U+039C -->
+<!ENTITY Nu CDATA "&#925;" -- greek capital letter nu, U+039D -->
+<!ENTITY Xi CDATA "&#926;" -- greek capital letter xi, U+039E ISOgrk3 -->
+<!ENTITY Omicron CDATA "&#927;" -- greek capital letter omicron, U+039F -->
+<!ENTITY Pi CDATA "&#928;" -- greek capital letter pi, U+03A0 ISOgrk3 -->
+<!ENTITY Rho CDATA "&#929;" -- greek capital letter rho, U+03A1 -->
+<!-- there is no Sigmaf, and no U+03A2 character either -->
+<!ENTITY Sigma CDATA "&#931;" -- greek capital letter sigma,
+ U+03A3 ISOgrk3 -->
+<!ENTITY Tau CDATA "&#932;" -- greek capital letter tau, U+03A4 -->
+<!ENTITY Upsilon CDATA "&#933;" -- greek capital letter upsilon,
+ U+03A5 ISOgrk3 -->
+<!ENTITY Phi CDATA "&#934;" -- greek capital letter phi,
+ U+03A6 ISOgrk3 -->
+<!ENTITY Chi CDATA "&#935;" -- greek capital letter chi, U+03A7 -->
+<!ENTITY Psi CDATA "&#936;" -- greek capital letter psi,
+ U+03A8 ISOgrk3 -->
+<!ENTITY Omega CDATA "&#937;" -- greek capital letter omega,
+ U+03A9 ISOgrk3 -->
+
+<!ENTITY alpha CDATA "&#945;" -- greek small letter alpha,
+ U+03B1 ISOgrk3 -->
+<!ENTITY beta CDATA "&#946;" -- greek small letter beta, U+03B2 ISOgrk3 -->
+<!ENTITY gamma CDATA "&#947;" -- greek small letter gamma,
+ U+03B3 ISOgrk3 -->
+<!ENTITY delta CDATA "&#948;" -- greek small letter delta,
+ U+03B4 ISOgrk3 -->
+<!ENTITY epsilon CDATA "&#949;" -- greek small letter epsilon,
+ U+03B5 ISOgrk3 -->
+<!ENTITY zeta CDATA "&#950;" -- greek small letter zeta, U+03B6 ISOgrk3 -->
+<!ENTITY eta CDATA "&#951;" -- greek small letter eta, U+03B7 ISOgrk3 -->
+<!ENTITY theta CDATA "&#952;" -- greek small letter theta,
+ U+03B8 ISOgrk3 -->
+<!ENTITY iota CDATA "&#953;" -- greek small letter iota, U+03B9 ISOgrk3 -->
+<!ENTITY kappa CDATA "&#954;" -- greek small letter kappa,
+ U+03BA ISOgrk3 -->
+<!ENTITY lambda CDATA "&#955;" -- greek small letter lambda,
+ U+03BB ISOgrk3 -->
+<!ENTITY mu CDATA "&#956;" -- greek small letter mu, U+03BC ISOgrk3 -->
+<!ENTITY nu CDATA "&#957;" -- greek small letter nu, U+03BD ISOgrk3 -->
+<!ENTITY xi CDATA "&#958;" -- greek small letter xi, U+03BE ISOgrk3 -->
+<!ENTITY omicron CDATA "&#959;" -- greek small letter omicron, U+03BF NEW -->
+<!ENTITY pi CDATA "&#960;" -- greek small letter pi, U+03C0 ISOgrk3 -->
+<!ENTITY rho CDATA "&#961;" -- greek small letter rho, U+03C1 ISOgrk3 -->
+<!ENTITY sigmaf CDATA "&#962;" -- greek small letter final sigma,
+ U+03C2 ISOgrk3 -->
+<!ENTITY sigma CDATA "&#963;" -- greek small letter sigma,
+ U+03C3 ISOgrk3 -->
+<!ENTITY tau CDATA "&#964;" -- greek small letter tau, U+03C4 ISOgrk3 -->
+<!ENTITY upsilon CDATA "&#965;" -- greek small letter upsilon,
+ U+03C5 ISOgrk3 -->
+<!ENTITY phi CDATA "&#966;" -- greek small letter phi, U+03C6 ISOgrk3 -->
+<!ENTITY chi CDATA "&#967;" -- greek small letter chi, U+03C7 ISOgrk3 -->
+<!ENTITY psi CDATA "&#968;" -- greek small letter psi, U+03C8 ISOgrk3 -->
+<!ENTITY omega CDATA "&#969;" -- greek small letter omega,
+ U+03C9 ISOgrk3 -->
+<!ENTITY thetasym CDATA "&#977;" -- greek small letter theta symbol,
+ U+03D1 NEW -->
+<!ENTITY upsih CDATA "&#978;" -- greek upsilon with hook symbol,
+ U+03D2 NEW -->
+<!ENTITY piv CDATA "&#982;" -- greek pi symbol, U+03D6 ISOgrk3 -->
+
+<!-- General Punctuation -->
+<!ENTITY bull CDATA "&#8226;" -- bullet = black small circle,
+ U+2022 ISOpub -->
+<!-- bullet is NOT the same as bullet operator, U+2219 -->
+<!ENTITY hellip CDATA "&#8230;" -- horizontal ellipsis = three dot leader,
+ U+2026 ISOpub -->
+<!ENTITY prime CDATA "&#8242;" -- prime = minutes = feet, U+2032 ISOtech -->
+<!ENTITY Prime CDATA "&#8243;" -- double prime = seconds = inches,
+ U+2033 ISOtech -->
+<!ENTITY oline CDATA "&#8254;" -- overline = spacing overscore,
+ U+203E NEW -->
+<!ENTITY frasl CDATA "&#8260;" -- fraction slash, U+2044 NEW -->
+
+<!-- Letterlike Symbols -->
+<!ENTITY weierp CDATA "&#8472;" -- script capital P = power set
+ = Weierstrass p, U+2118 ISOamso -->
+<!ENTITY image CDATA "&#8465;" -- blackletter capital I = imaginary part,
+ U+2111 ISOamso -->
+<!ENTITY real CDATA "&#8476;" -- blackletter capital R = real part symbol,
+ U+211C ISOamso -->
+<!ENTITY trade CDATA "&#8482;" -- trade mark sign, U+2122 ISOnum -->
+<!ENTITY alefsym CDATA "&#8501;" -- alef symbol = first transfinite cardinal,
+ U+2135 NEW -->
+<!-- alef symbol is NOT the same as hebrew letter alef,
+ U+05D0 although the same glyph could be used to depict both characters -->
+
+<!-- Arrows -->
+<!ENTITY larr CDATA "&#8592;" -- leftwards arrow, U+2190 ISOnum -->
+<!ENTITY uarr CDATA "&#8593;" -- upwards arrow, U+2191 ISOnum-->
+<!ENTITY rarr CDATA "&#8594;" -- rightwards arrow, U+2192 ISOnum -->
+<!ENTITY darr CDATA "&#8595;" -- downwards arrow, U+2193 ISOnum -->
+<!ENTITY harr CDATA "&#8596;" -- left right arrow, U+2194 ISOamsa -->
+<!ENTITY crarr CDATA "&#8629;" -- downwards arrow with corner leftwards
+ = carriage return, U+21B5 NEW -->
+<!ENTITY lArr CDATA "&#8656;" -- leftwards double arrow, U+21D0 ISOtech -->
+<!-- Unicode does not say that lArr is the same as the 'is implied by' arrow
+ but also does not have any other character for that function. So ? lArr can
+ be used for 'is implied by' as ISOtech suggests -->
+<!ENTITY uArr CDATA "&#8657;" -- upwards double arrow, U+21D1 ISOamsa -->
+<!ENTITY rArr CDATA "&#8658;" -- rightwards double arrow,
+ U+21D2 ISOtech -->
+<!-- Unicode does not say this is the 'implies' character but does not have
+ another character with this function so ?
+ rArr can be used for 'implies' as ISOtech suggests -->
+<!ENTITY dArr CDATA "&#8659;" -- downwards double arrow, U+21D3 ISOamsa -->
+<!ENTITY hArr CDATA "&#8660;" -- left right double arrow,
+ U+21D4 ISOamsa -->
+
+<!-- Mathematical Operators -->
+<!ENTITY forall CDATA "&#8704;" -- for all, U+2200 ISOtech -->
+<!ENTITY part CDATA "&#8706;" -- partial differential, U+2202 ISOtech -->
+<!ENTITY exist CDATA "&#8707;" -- there exists, U+2203 ISOtech -->
+<!ENTITY empty CDATA "&#8709;" -- empty set = null set = diameter,
+ U+2205 ISOamso -->
+<!ENTITY nabla CDATA "&#8711;" -- nabla = backward difference,
+ U+2207 ISOtech -->
+<!ENTITY isin CDATA "&#8712;" -- element of, U+2208 ISOtech -->
+<!ENTITY notin CDATA "&#8713;" -- not an element of, U+2209 ISOtech -->
+<!ENTITY ni CDATA "&#8715;" -- contains as member, U+220B ISOtech -->
+<!-- should there be a more memorable name than 'ni'? -->
+<!ENTITY prod CDATA "&#8719;" -- n-ary product = product sign,
+ U+220F ISOamsb -->
+<!-- prod is NOT the same character as U+03A0 'greek capital letter pi' though
+ the same glyph might be used for both -->
+<!ENTITY sum CDATA "&#8721;" -- n-ary sumation, U+2211 ISOamsb -->
+<!-- sum is NOT the same character as U+03A3 'greek capital letter sigma'
+ though the same glyph might be used for both -->
+<!ENTITY minus CDATA "&#8722;" -- minus sign, U+2212 ISOtech -->
+<!ENTITY lowast CDATA "&#8727;" -- asterisk operator, U+2217 ISOtech -->
+<!ENTITY radic CDATA "&#8730;" -- square root = radical sign,
+ U+221A ISOtech -->
+<!ENTITY prop CDATA "&#8733;" -- proportional to, U+221D ISOtech -->
+<!ENTITY infin CDATA "&#8734;" -- infinity, U+221E ISOtech -->
+<!ENTITY ang CDATA "&#8736;" -- angle, U+2220 ISOamso -->
+<!ENTITY and CDATA "&#8743;" -- logical and = wedge, U+2227 ISOtech -->
+<!ENTITY or CDATA "&#8744;" -- logical or = vee, U+2228 ISOtech -->
+<!ENTITY cap CDATA "&#8745;" -- intersection = cap, U+2229 ISOtech -->
+<!ENTITY cup CDATA "&#8746;" -- union = cup, U+222A ISOtech -->
+<!ENTITY int CDATA "&#8747;" -- integral, U+222B ISOtech -->
+<!ENTITY there4 CDATA "&#8756;" -- therefore, U+2234 ISOtech -->
+<!ENTITY sim CDATA "&#8764;" -- tilde operator = varies with = similar to,
+ U+223C ISOtech -->
+<!-- tilde operator is NOT the same character as the tilde, U+007E,
+ although the same glyph might be used to represent both -->
+<!ENTITY cong CDATA "&#8773;" -- approximately equal to, U+2245 ISOtech -->
+<!ENTITY asymp CDATA "&#8776;" -- almost equal to = asymptotic to,
+ U+2248 ISOamsr -->
+<!ENTITY ne CDATA "&#8800;" -- not equal to, U+2260 ISOtech -->
+<!ENTITY equiv CDATA "&#8801;" -- identical to, U+2261 ISOtech -->
+<!ENTITY le CDATA "&#8804;" -- less-than or equal to, U+2264 ISOtech -->
+<!ENTITY ge CDATA "&#8805;" -- greater-than or equal to,
+ U+2265 ISOtech -->
+<!ENTITY sub CDATA "&#8834;" -- subset of, U+2282 ISOtech -->
+<!ENTITY sup CDATA "&#8835;" -- superset of, U+2283 ISOtech -->
+<!-- note that nsup, 'not a superset of, U+2283' is not covered by the Symbol
+ font encoding and is not included. Should it be, for symmetry?
+ It is in ISOamsn -->
+<!ENTITY nsub CDATA "&#8836;" -- not a subset of, U+2284 ISOamsn -->
+<!ENTITY sube CDATA "&#8838;" -- subset of or equal to, U+2286 ISOtech -->
+<!ENTITY supe CDATA "&#8839;" -- superset of or equal to,
+ U+2287 ISOtech -->
+<!ENTITY oplus CDATA "&#8853;" -- circled plus = direct sum,
+ U+2295 ISOamsb -->
+<!ENTITY otimes CDATA "&#8855;" -- circled times = vector product,
+ U+2297 ISOamsb -->
+<!ENTITY perp CDATA "&#8869;" -- up tack = orthogonal to = perpendicular,
+ U+22A5 ISOtech -->
+<!ENTITY sdot CDATA "&#8901;" -- dot operator, U+22C5 ISOamsb -->
+<!-- dot operator is NOT the same character as U+00B7 middle dot -->
+
+<!-- Miscellaneous Technical -->
+<!ENTITY lceil CDATA "&#8968;" -- left ceiling = apl upstile,
+ U+2308 ISOamsc -->
+<!ENTITY rceil CDATA "&#8969;" -- right ceiling, U+2309 ISOamsc -->
+<!ENTITY lfloor CDATA "&#8970;" -- left floor = apl downstile,
+ U+230A ISOamsc -->
+<!ENTITY rfloor CDATA "&#8971;" -- right floor, U+230B ISOamsc -->
+<!ENTITY lang CDATA "&#9001;" -- left-pointing angle bracket = bra,
+ U+2329 ISOtech -->
+<!-- lang is NOT the same character as U+003C 'less than'
+ or U+2039 'single left-pointing angle quotation mark' -->
+<!ENTITY rang CDATA "&#9002;" -- right-pointing angle bracket = ket,
+ U+232A ISOtech -->
+<!-- rang is NOT the same character as U+003E 'greater than'
+ or U+203A 'single right-pointing angle quotation mark' -->
+
+<!-- Geometric Shapes -->
+<!ENTITY loz CDATA "&#9674;" -- lozenge, U+25CA ISOpub -->
+
+<!-- Miscellaneous Symbols -->
+<!ENTITY spades CDATA "&#9824;" -- black spade suit, U+2660 ISOpub -->
+<!-- black here seems to mean filled as opposed to hollow -->
+<!ENTITY clubs CDATA "&#9827;" -- black club suit = shamrock,
+ U+2663 ISOpub -->
+<!ENTITY hearts CDATA "&#9829;" -- black heart suit = valentine,
+ U+2665 ISOpub -->
+<!ENTITY diams CDATA "&#9830;" -- black diamond suit, U+2666 ISOpub --> \ No newline at end of file
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..e0d2e2e
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-1"><span class="linenos">1</span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos">3</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..4ecfa2d
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-1"><span class="linenos">1</span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos">3</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..bfa915c
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos">1</span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos">3</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..9ad5369
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos">1</span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos">3</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..09a39e8
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-1"><span class="linenos">1</span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos special">3</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..be1416b
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-1"><span class="linenos">1</span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos special">3</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..eb0e032
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos">1</span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos special">3</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..c984c90
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos">1</span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos special">3</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..1d4de97
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos"> 9</span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..8b34749
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos"> 9</span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..0335a40
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos"> 9</span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..dd9874a
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos"> 9</span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..1821406
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos special"> 9</span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..b403949
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos special"> 9</span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..ec4a0da
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos special"> 9</span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..dd74c64
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos special"> 9</span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..41aa7be
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-1"><span class="linenos"> </span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos"> </span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..27865cc
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-1"><span class="linenos"> </span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos"> </span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..7fa5d08
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos"> </span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos"> </span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..b68a33e
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos"> </span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos"> </span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..22d07f1
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-1"><span class="linenos"> </span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos special"> </span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..4cfdd4c
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-1"><span class="linenos"> </span></a><span class="c1"># a</span>
+<a href="#-2"><span class="linenos">2</span></a><span class="c1"># b</span>
+<a href="#-3"><span class="linenos special"> </span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..0e977e4
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos"> </span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos special"> </span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..6345020
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos"> </span><span class="c1"># a</span>
+<span class="linenos">2</span><span class="c1"># b</span>
+<span class="linenos special"> </span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..50b491a
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos"> </span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..561d47f
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos"> </span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..fd7c0f0
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos"> </span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..12276e8
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos"> </span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..38662f9
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos special"> </span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..a07dfcc
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><a href="#-8"><span class="linenos"> 8</span></a><span class="c1"># a</span>
+<a href="#-9"><span class="linenos special"> </span></a><span class="c1"># b</span>
+<a href="#-10"><span class="linenos">10</span></a><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..56bca93
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><span class="filename">testfilename</span><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos special"> </span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..71548a1
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight"><pre><span></span><span class="linenos"> 8</span><span class="c1"># a</span>
+<span class="linenos special"> </span><span class="c1"># b</span>
+<span class="linenos">10</span><span class="c1"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..d9eeca5
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">3</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..0913e4f
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">3</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..31525d4
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..e1722cf
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..ee18c21
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..628a1df
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..633c3d5
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..2c705ad
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..9c91cf4
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 9</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..d39cb2c
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 9</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..d3e4818
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..1233674
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..ef26126
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..8a64149
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..3f08277
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..f5ac0bc
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..3153304
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..47c4a13
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..28ab99c
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..eb6e847
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..ec9d56f
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..3121cd0
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-1"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-2"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-3"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..c4c4c57
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..f003b15
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..26a2dd4
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..bb1be91
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..c8c440d
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..9af5753
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..76a4047
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..aeb6b10
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><a href="#-8"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span></a><span style="color: #3D7B7B; font-style: italic"># a</span>
+<a href="#-9"><span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></a><span style="color: #3D7B7B; font-style: italic"># b</span>
+<a href="#-10"><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></a><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..cb5b02b
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><span class="filename">testfilename</span><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..5e472a1
--- /dev/null
+++ b/tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,4 @@
+<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..861165c
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-1">1</a></span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="normal"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..af31392
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-1">1</a></span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="normal"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..a27ddba
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal">1</span>
+<span class="normal">2</span>
+<span class="normal">3</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..b3044e7
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal">1</span>
+<span class="normal">2</span>
+<span class="normal">3</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..ee74013
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-1">1</a></span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="special"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..a4a3f8e
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-1">1</a></span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="special"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..f910782
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal">1</span>
+<span class="normal">2</span>
+<span class="special">3</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..e79e436
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal">1</span>
+<span class="normal">2</span>
+<span class="special">3</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..1a396a7
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="normal"><a href="#-9"> 9</a></span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..04f76cb
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="normal"><a href="#-9"> 9</a></span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..99675f0
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="normal"> 9</span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..1d69eaf
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="normal"> 9</span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..9a47c60
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="special"><a href="#-9"> 9</a></span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..03285e9
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="special"><a href="#-9"> 9</a></span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..0205691
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="special"> 9</span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..b94db01
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="special"> 9</span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..bd13be4
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="normal"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..fcacc18
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="normal"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..4a418e6
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal">2</span>
+<span class="normal"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..7128962
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal">2</span>
+<span class="normal"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..60cab0e
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="special"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..848e666
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal"><a href="#-2">2</a></span>
+<span class="special"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..a690bd0
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal">2</span>
+<span class="special"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..31ceb56
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> </span>
+<span class="normal">2</span>
+<span class="special"> </span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..3a750e9
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="normal"> </span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..6837563
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="normal"> </span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..bc0a078
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="normal"> </span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..a8125e8
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="normal"> </span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..36a8e5f
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="special"> </span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..200fffe
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"><a href="#-8"> 8</a></span>
+<span class="special"> </span>
+<span class="normal"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..f9efd12
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="special"> </span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..2d9d86e
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span class="normal"> 8</span>
+<span class="special"> </span>
+<span class="normal">10</span></pre></div></td><td class="code"><div><pre><span></span><span class="c1"># a</span>
+<span class="c1"># b</span>
+<span class="c1"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..1cdc6bb
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-1">1</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..6bad0df
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-1">1</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..ffd88d4
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">3</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..62c5a6e
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">3</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..e719f8e
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-1">1</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..e07cebe
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-1">1</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"><a href="#-3">3</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..fbb6cde
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..ce4bfa8
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">1</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..3402b4c
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-9"> 9</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..2eb3396
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-9"> 9</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..51b2dba
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 9</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..91f66e1
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 9</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..7e5aadf
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"><a href="#-9"> 9</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..b10fdca
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"><a href="#-9"> 9</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..8067f7e
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..f3fde8b
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_filename.html
new file mode 100644
index 0000000..8be13f0
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..f72b326
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_filename.html
new file mode 100644
index 0000000..512db3b
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..26d6723
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_filename.html
new file mode 100644
index 0000000..0270512
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..d6f4d3c
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_filename.html
new file mode 100644
index 0000000..1f6dc86
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..c606d01
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_filename.html
new file mode 100644
index 0000000..b90f2b0
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_nofilename.html
new file mode 100644
index 0000000..926dd8c
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_filename.html
new file mode 100644
index 0000000..1623295
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_nofilename.html
new file mode 100644
index 0000000..0252dce
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_filename.html
new file mode 100644
index 0000000..791f640
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_nofilename.html
new file mode 100644
index 0000000..bbc20f6
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_filename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_filename.html
new file mode 100644
index 0000000..5c04be3
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_filename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><th class="filename" colspan="2"><span class="filename">testfilename</span></th></tr><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_nofilename.html b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_nofilename.html
new file mode 100644
index 0000000..fc75b66
--- /dev/null
+++ b/tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor_nofilename.html
@@ -0,0 +1,6 @@
+<div class="highlight" style="background: #f8f8f8"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px;">10</span></pre></div></td><td class="code"><div><pre style="line-height: 125%;"><span></span><span style="color: #3D7B7B; font-style: italic"># a</span>
+<span style="color: #3D7B7B; font-style: italic"># b</span>
+<span style="color: #3D7B7B; font-style: italic"># c</span>
+</pre></div></td></tr></table></div>
diff --git a/tests/snippets/apacheconf/test_directive_no_args.txt b/tests/snippets/apacheconf/test_directive_no_args.txt
new file mode 100644
index 0000000..a2f3354
--- /dev/null
+++ b/tests/snippets/apacheconf/test_directive_no_args.txt
@@ -0,0 +1,12 @@
+---input---
+Example
+ServerName localhost
+
+---tokens---
+'Example' Name.Builtin
+'\n' Text.Whitespace
+
+'ServerName' Name.Builtin
+' ' Text.Whitespace
+'localhost' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_fix_lock_absolute_path.txt b/tests/snippets/apacheconf/test_fix_lock_absolute_path.txt
new file mode 100644
index 0000000..a274834
--- /dev/null
+++ b/tests/snippets/apacheconf/test_fix_lock_absolute_path.txt
@@ -0,0 +1,8 @@
+---input---
+LockFile /var/lock/apache2/accept.lock
+
+---tokens---
+'LockFile' Name.Builtin
+' ' Text.Whitespace
+'/var/lock/apache2/accept.lock' Literal.String.Other
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_include_globs.txt b/tests/snippets/apacheconf/test_include_globs.txt
new file mode 100644
index 0000000..495242e
--- /dev/null
+++ b/tests/snippets/apacheconf/test_include_globs.txt
@@ -0,0 +1,8 @@
+---input---
+Include /etc/httpd/conf.d/*.conf
+
+---tokens---
+'Include' Name.Builtin
+' ' Text.Whitespace
+'/etc/httpd/conf.d/*.conf' Literal.String.Other
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_malformed_scoped_directive_closing_tag.txt b/tests/snippets/apacheconf/test_malformed_scoped_directive_closing_tag.txt
new file mode 100644
index 0000000..c65628b
--- /dev/null
+++ b/tests/snippets/apacheconf/test_malformed_scoped_directive_closing_tag.txt
@@ -0,0 +1,19 @@
+---input---
+<VirtualHost "test">
+</VirtualHost
+>
+
+---tokens---
+'<VirtualHost' Name.Tag
+' ' Text.Whitespace
+'"test"' Literal.String
+'>' Name.Tag
+'\n' Text.Whitespace
+
+'<' Error
+'/' Error
+'VirtualHost' Name.Builtin
+'\n' Text.Whitespace
+
+'>' Error
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_multi_include_globs.txt b/tests/snippets/apacheconf/test_multi_include_globs.txt
new file mode 100644
index 0000000..abf5bb0
--- /dev/null
+++ b/tests/snippets/apacheconf/test_multi_include_globs.txt
@@ -0,0 +1,8 @@
+---input---
+Include /etc/httpd/conf.d/*/*.conf
+
+---tokens---
+'Include' Name.Builtin
+' ' Text.Whitespace
+'/etc/httpd/conf.d/*/*.conf' Literal.String.Other
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_multi_include_globs_root.txt b/tests/snippets/apacheconf/test_multi_include_globs_root.txt
new file mode 100644
index 0000000..5df63e4
--- /dev/null
+++ b/tests/snippets/apacheconf/test_multi_include_globs_root.txt
@@ -0,0 +1,8 @@
+---input---
+Include /*conf/*.conf
+
+---tokens---
+'Include' Name.Builtin
+' ' Text.Whitespace
+'/*conf/*.conf' Literal.String.Other
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_multiline_argument.txt b/tests/snippets/apacheconf/test_multiline_argument.txt
new file mode 100644
index 0000000..4163970
--- /dev/null
+++ b/tests/snippets/apacheconf/test_multiline_argument.txt
@@ -0,0 +1,20 @@
+---input---
+SecAction \
+ "id:'900001', \
+ phase:1, \
+ t:none, \
+ setvar:tx.critical_anomaly_score=5, \
+ setvar:tx.error_anomaly_score=4, \
+ setvar:tx.warning_anomaly_score=3, \
+ setvar:tx.notice_anomaly_score=2, \
+ nolog, \
+ pass"
+
+---tokens---
+'SecAction' Name.Builtin
+' ' Text.Whitespace
+'\\\n' Text
+
+' ' Text.Whitespace
+'"id:\'900001\', \\\n phase:1, \\\n t:none, \\\n setvar:tx.critical_anomaly_score=5, \\\n setvar:tx.error_anomaly_score=4, \\\n setvar:tx.warning_anomaly_score=3, \\\n setvar:tx.notice_anomaly_score=2, \\\n nolog, \\\n pass"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_multiline_comment.txt b/tests/snippets/apacheconf/test_multiline_comment.txt
new file mode 100644
index 0000000..cdcfe01
--- /dev/null
+++ b/tests/snippets/apacheconf/test_multiline_comment.txt
@@ -0,0 +1,12 @@
+---input---
+#SecAction \
+ "id:'900004', \
+ phase:1, \
+ t:none, \
+ setvar:tx.anomaly_score_blocking=on, \
+ nolog, \
+ pass"
+
+---tokens---
+'#SecAction \\\n "id:\'900004\', \\\n phase:1, \\\n t:none, \\\n setvar:tx.anomaly_score_blocking=on, \\\n nolog, \\\n pass"' Comment
+'\n' Text.Whitespace
diff --git a/tests/snippets/apacheconf/test_normal_scoped_directive.txt b/tests/snippets/apacheconf/test_normal_scoped_directive.txt
new file mode 100644
index 0000000..f00496d
--- /dev/null
+++ b/tests/snippets/apacheconf/test_normal_scoped_directive.txt
@@ -0,0 +1,14 @@
+---input---
+<VirtualHost "test">
+</VirtualHost>
+
+---tokens---
+'<VirtualHost' Name.Tag
+' ' Text.Whitespace
+'"test"' Literal.String
+'>' Name.Tag
+'\n' Text.Whitespace
+
+'</VirtualHost' Name.Tag
+'>' Name.Tag
+'\n' Text.Whitespace
diff --git a/tests/snippets/apl/test_leading_underscore.txt b/tests/snippets/apl/test_leading_underscore.txt
new file mode 100644
index 0000000..c11258d
--- /dev/null
+++ b/tests/snippets/apl/test_leading_underscore.txt
@@ -0,0 +1,26 @@
+---input---
+_op_←{_←⍺ ⍵⋄(⍺⍺ ⍺)+⍵⍵ ⍵}
+
+---tokens---
+'_op_' Name.Variable
+'←' Keyword.Declaration
+'{' Keyword.Type
+'_' Name.Variable
+'←' Keyword.Declaration
+'⍺' Name.Builtin.Pseudo
+' ' Text.Whitespace
+'⍵' Name.Builtin.Pseudo
+'⋄' Punctuation
+'(' Punctuation
+'⍺' Name.Builtin.Pseudo
+'⍺' Name.Builtin.Pseudo
+' ' Text.Whitespace
+'⍺' Name.Builtin.Pseudo
+')' Punctuation
+'+' Operator
+'⍵' Name.Builtin.Pseudo
+'⍵' Name.Builtin.Pseudo
+' ' Text.Whitespace
+'⍵' Name.Builtin.Pseudo
+'}' Keyword.Type
+'\n' Text.Whitespace
diff --git a/tests/snippets/asm/test_cpuid.txt b/tests/snippets/asm/test_cpuid.txt
new file mode 100644
index 0000000..8256618
--- /dev/null
+++ b/tests/snippets/asm/test_cpuid.txt
@@ -0,0 +1,9 @@
+# CPU is a valid directive, and we don't want to parse this as
+# cpu id, but as a single token. See bug #1517
+
+---input---
+cpuid
+
+---tokens---
+'cpuid' Name.Function
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_basic_bst.txt b/tests/snippets/bibtex/test_basic_bst.txt
new file mode 100644
index 0000000..c519ae0
--- /dev/null
+++ b/tests/snippets/bibtex/test_basic_bst.txt
@@ -0,0 +1,54 @@
+---input---
+% BibTeX standard bibliography style `plain'
+
+INTEGERS { output.state before.all }
+
+FUNCTION {sort.format.title}
+{ 't :=
+"A " #2
+ "An " #3
+ "The " #4 t chop.word
+ chop.word
+chop.word
+sortify
+#1 global.max$ substring$
+}
+
+ITERATE {call.type$}
+
+---tokens---
+"% BibTeX standard bibliography style `plain'" Comment
+'\n\n' Text.Whitespace
+
+'INTEGERS { output.state before.all }' Comment
+'\n\n' Text.Whitespace
+
+'FUNCTION {sort.format.title}' Comment
+'\n' Text.Whitespace
+
+"{ 't :=" Comment
+'\n' Text.Whitespace
+
+'"A " #2' Comment
+'\n ' Text.Whitespace
+'"An " #3' Comment
+'\n ' Text.Whitespace
+'"The " #4 t chop.word' Comment
+'\n ' Text.Whitespace
+'chop.word' Comment
+'\n' Text.Whitespace
+
+'chop.word' Comment
+'\n' Text.Whitespace
+
+'sortify' Comment
+'\n' Text.Whitespace
+
+'#1 global.max$ substring$' Comment
+'\n' Text.Whitespace
+
+'}' Comment
+'\n\n' Text.Whitespace
+
+'ITERATE {call.type$}' Comment
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_comment.txt b/tests/snippets/bibtex/test_comment.txt
new file mode 100644
index 0000000..6e98d64
--- /dev/null
+++ b/tests/snippets/bibtex/test_comment.txt
@@ -0,0 +1,7 @@
+---input---
+@COMMENT{test}
+
+---tokens---
+'@COMMENT' Comment
+'{test}' Comment
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_entry.txt b/tests/snippets/bibtex/test_entry.txt
new file mode 100644
index 0000000..c712a2f
--- /dev/null
+++ b/tests/snippets/bibtex/test_entry.txt
@@ -0,0 +1,63 @@
+---input---
+This is a comment.
+
+@ARTICLE{ruckenstein-diffusion,
+ author = "Liu, Hongquin" # and # "Ruckenstein, Eli",
+ year = 1997,
+ month = JAN,
+ pages = "888-895"
+}
+
+---tokens---
+'This is a comment.' Comment
+'\n\n' Text.Whitespace
+
+'@ARTICLE' Name.Class
+'{' Punctuation
+'ruckenstein-diffusion' Name.Label
+',' Punctuation
+'\n ' Text.Whitespace
+'author' Name.Attribute
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'"' Literal.String
+'Liu, Hongquin' Literal.String
+'"' Literal.String
+' ' Text.Whitespace
+'#' Punctuation
+' ' Text.Whitespace
+'and' Name.Variable
+' ' Text.Whitespace
+'#' Punctuation
+' ' Text.Whitespace
+'"' Literal.String
+'Ruckenstein, Eli' Literal.String
+'"' Literal.String
+',' Punctuation
+'\n ' Text.Whitespace
+'year' Name.Attribute
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'1997' Literal.Number
+',' Punctuation
+'\n ' Text.Whitespace
+'month' Name.Attribute
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'JAN' Name.Variable
+',' Punctuation
+'\n ' Text.Whitespace
+'pages' Name.Attribute
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'"' Literal.String
+'888-895' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_mismatched_brace.txt b/tests/snippets/bibtex/test_mismatched_brace.txt
new file mode 100644
index 0000000..6c1deda
--- /dev/null
+++ b/tests/snippets/bibtex/test_mismatched_brace.txt
@@ -0,0 +1,10 @@
+---input---
+@PREAMBLE(""}
+
+---tokens---
+'@PREAMBLE' Name.Class
+'(' Punctuation
+'"' Literal.String
+'"' Literal.String
+'}' Error
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_missing_body.txt b/tests/snippets/bibtex/test_missing_body.txt
new file mode 100644
index 0000000..24dad98
--- /dev/null
+++ b/tests/snippets/bibtex/test_missing_body.txt
@@ -0,0 +1,10 @@
+---input---
+@ARTICLE xxx
+
+---tokens---
+'@ARTICLE' Name.Class
+' ' Text.Whitespace
+'x' Error
+'x' Error
+'x' Error
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_preamble.txt b/tests/snippets/bibtex/test_preamble.txt
new file mode 100644
index 0000000..9625f96
--- /dev/null
+++ b/tests/snippets/bibtex/test_preamble.txt
@@ -0,0 +1,11 @@
+---input---
+@PREAMBLE{"% some LaTeX code here"}
+
+---tokens---
+'@PREAMBLE' Name.Class
+'{' Punctuation
+'"' Literal.String
+'% some LaTeX code here' Literal.String
+'"' Literal.String
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/bibtex/test_string.txt b/tests/snippets/bibtex/test_string.txt
new file mode 100644
index 0000000..f76a31b
--- /dev/null
+++ b/tests/snippets/bibtex/test_string.txt
@@ -0,0 +1,15 @@
+---input---
+@STRING(SCI = "Science")
+
+---tokens---
+'@STRING' Name.Class
+'(' Punctuation
+'SCI' Name.Attribute
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'"' Literal.String
+'Science' Literal.String
+'"' Literal.String
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_comment_end.txt b/tests/snippets/c/test_comment_end.txt
new file mode 100644
index 0000000..2a21d65
--- /dev/null
+++ b/tests/snippets/c/test_comment_end.txt
@@ -0,0 +1,31 @@
+In the past the "*/" was marked as an error to "helpfully"
+indicate a wrong comment end.
+
+---input---
+int m21=((result_0*0+result_1*/*0<-----*/1)%mod+mod)%mod;
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'m21' Name
+'=' Operator
+'(' Punctuation
+'(' Punctuation
+'result_0' Name
+'*' Operator
+'0' Literal.Number.Integer
+'+' Operator
+'result_1' Name
+'*' Operator
+'/*0<-----*/' Comment.Multiline
+'1' Literal.Number.Integer
+')' Punctuation
+'%' Operator
+'mod' Name
+'+' Operator
+'mod' Name
+')' Punctuation
+'%' Operator
+'mod' Name
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_function_comments.txt b/tests/snippets/c/test_function_comments.txt
new file mode 100644
index 0000000..e8d8a69
--- /dev/null
+++ b/tests/snippets/c/test_function_comments.txt
@@ -0,0 +1,409 @@
+---input---
+int func1(int x, int y)
+ /*@requires y >= 0*/
+{
+ return x / y;
+}
+
+
+int func2(int x, int y) //@requires y >= 0;
+{
+ return x / y;
+}
+
+
+void func3()
+//#test{};
+{
+ fun(2,3)//test1;
+ ;
+}
+
+
+int func4(int x, int y)
+ /*@requires y >= 0;*/
+{
+ return x / y;
+}
+
+
+int func5(int x, int y)
+ /*@requires y >= 0
+ {
+ return x / y;
+ }
+ */
+ {
+ return 2;
+ }
+
+
+//@requires y >= 0;
+//@requires y >= 0
+/*
+calling(2,5)
+*/
+/*
+calling(2,5);
+*/
+int func6(int x, int y)
+ //@requires y >= 0
+ //@requires y >= 0;
+ /*
+ hello(2,3);
+ */
+ /*
+ hello(2,3)
+ */
+ {
+ // haha(2,3);
+ return x / y;
+ /*
+ callblabla(x, y);
+ */
+ }
+//@requires y >= 0;
+//@requires y >= 0
+/*
+calling(2,5)
+*/
+/*
+calling(2,5);
+*/
+
+
+int * //@# a pointer to int
+func7 /* @# why a comment here? */ (
+ int /* the index has to be an int */ a, // index into the array
+ int *b //the array @!
+)
+/*
+ The end of the func params @ (@ will result error if parsed incorrectly)
+*/
+{
+ // yet another comment
+ return b[a];
+}
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'func1' Name.Function
+'(' Punctuation
+'int' Keyword.Type
+' ' Text.Whitespace
+'x' Name
+',' Punctuation
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'y' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'/*@requires y >= 0*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'return' Keyword
+' ' Text.Whitespace
+'x' Name
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'y' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'int' Keyword.Type
+' ' Text.Whitespace
+'func2' Name.Function
+'(' Punctuation
+'int' Keyword.Type
+' ' Text.Whitespace
+'x' Name
+',' Punctuation
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'y' Name
+')' Punctuation
+' ' Text.Whitespace
+'//@requires y >= 0;\n' Comment.Single
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'return' Keyword
+' ' Text.Whitespace
+'x' Name
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'y' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'void' Keyword.Type
+' ' Text.Whitespace
+'func3' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'//#test{};\n' Comment.Single
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'fun' Name
+'(' Punctuation
+'2' Literal.Number.Integer
+',' Punctuation
+'3' Literal.Number.Integer
+')' Punctuation
+'//test1;\n' Comment.Single
+
+' ' Text.Whitespace
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'int' Keyword.Type
+' ' Text.Whitespace
+'func4' Name.Function
+'(' Punctuation
+'int' Keyword.Type
+' ' Text.Whitespace
+'x' Name
+',' Punctuation
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'y' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'/*@requires y >= 0;*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'return' Keyword
+' ' Text.Whitespace
+'x' Name
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'y' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'int' Keyword.Type
+' ' Text.Whitespace
+'func5' Name.Function
+'(' Punctuation
+'int' Keyword.Type
+' ' Text.Whitespace
+'x' Name
+',' Punctuation
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'y' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'/*@requires y >= 0\n {\n return x / y;\n }\n */' Comment.Multiline
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'return' Keyword
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'//@requires y >= 0;\n' Comment.Single
+
+'//@requires y >= 0\n' Comment.Single
+
+'/*\ncalling(2,5)\n*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'/*\ncalling(2,5);\n*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'int' Keyword.Type
+' ' Text.Whitespace
+'func6' Name.Function
+'(' Punctuation
+'int' Keyword.Type
+' ' Text.Whitespace
+'x' Name
+',' Punctuation
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'y' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'//@requires y >= 0\n' Comment.Single
+
+' ' Text.Whitespace
+'//@requires y >= 0;\n' Comment.Single
+
+' ' Text.Whitespace
+'/*\n hello(2,3);\n */' Comment.Multiline
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'/*\n hello(2,3)\n */' Comment.Multiline
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'// haha(2,3);\n' Comment.Single
+
+' ' Text.Whitespace
+'return' Keyword
+' ' Text.Whitespace
+'x' Name
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'y' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'/*\n callblabla(x, y);\n */' Comment.Multiline
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'//@requires y >= 0;\n' Comment.Single
+
+'//@requires y >= 0\n' Comment.Single
+
+'/*\ncalling(2,5)\n*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'/*\ncalling(2,5);\n*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'int' Keyword.Type
+' ' Text.Whitespace
+'*' Operator
+' ' Text.Whitespace
+'//@# a pointer to int\n' Comment.Single
+
+'func7' Name.Function
+' ' Text.Whitespace
+'/* @# why a comment here? */' Comment.Multiline
+' ' Text.Whitespace
+'(' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'/* the index has to be an int */' Comment.Multiline
+' ' Text.Whitespace
+'a' Name
+',' Punctuation
+' ' Text.Whitespace
+'// index into the array\n' Comment.Single
+
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'*' Operator
+'b' Name
+' ' Text.Whitespace
+'//the array @!\n' Comment.Single
+
+')' Punctuation
+'\n' Text.Whitespace
+
+'/*\n The end of the func params @ (@ will result error if parsed incorrectly)\n*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'// yet another comment\n' Comment.Single
+
+' ' Text.Whitespace
+'return' Keyword
+' ' Text.Whitespace
+'b' Name
+'[' Punctuation
+'a' Name
+']' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_label.txt b/tests/snippets/c/test_label.txt
new file mode 100644
index 0000000..6f4ee80
--- /dev/null
+++ b/tests/snippets/c/test_label.txt
@@ -0,0 +1,31 @@
+---input---
+int main()
+{
+foo:
+ goto foo;
+}
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'main' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+'foo' Name.Label
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'goto' Keyword
+' ' Text.Whitespace
+'foo' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_label_followed_by_statement.txt b/tests/snippets/c/test_label_followed_by_statement.txt
new file mode 100644
index 0000000..6dc8468
--- /dev/null
+++ b/tests/snippets/c/test_label_followed_by_statement.txt
@@ -0,0 +1,35 @@
+---input---
+int main()
+{
+foo:return 0;
+ goto foo;
+}
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'main' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+'foo' Name.Label
+':' Punctuation
+'return' Keyword
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'goto' Keyword
+' ' Text.Whitespace
+'foo' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_label_space_before_colon.txt b/tests/snippets/c/test_label_space_before_colon.txt
new file mode 100644
index 0000000..2c3d065
--- /dev/null
+++ b/tests/snippets/c/test_label_space_before_colon.txt
@@ -0,0 +1,32 @@
+---input---
+int main()
+{
+foo :
+ goto foo;
+}
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'main' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+'foo' Name.Label
+' ' Text.Whitespace
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'goto' Keyword
+' ' Text.Whitespace
+'foo' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_numbers.txt b/tests/snippets/c/test_numbers.txt
new file mode 100644
index 0000000..75af6fd
--- /dev/null
+++ b/tests/snippets/c/test_numbers.txt
@@ -0,0 +1,20 @@
+---input---
+42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23
+
+---tokens---
+'42' Literal.Number.Integer
+' ' Text.Whitespace
+'23.42' Literal.Number.Float
+' ' Text.Whitespace
+'23.' Literal.Number.Float
+' ' Text.Whitespace
+'.42' Literal.Number.Float
+' ' Text.Whitespace
+'023' Literal.Number.Oct
+' ' Text.Whitespace
+'0xdeadbeef' Literal.Number.Hex
+' ' Text.Whitespace
+'23e+42' Literal.Number.Float
+' ' Text.Whitespace
+'42e-23' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_preproc_file.txt b/tests/snippets/c/test_preproc_file.txt
new file mode 100644
index 0000000..2b5449e
--- /dev/null
+++ b/tests/snippets/c/test_preproc_file.txt
@@ -0,0 +1,17 @@
+---input---
+#include <foo>
+# include <foo>
+
+---tokens---
+'#' Comment.Preproc
+'include' Comment.Preproc
+' ' Text.Whitespace
+'<foo>' Comment.PreprocFile
+'\n' Comment.Preproc
+
+'#' Comment.Preproc
+' ' Text.Whitespace
+'include' Comment.Preproc
+' ' Text.Whitespace
+'<foo>' Comment.PreprocFile
+'\n' Comment.Preproc
diff --git a/tests/snippets/c/test_preproc_file2.txt b/tests/snippets/c/test_preproc_file2.txt
new file mode 100644
index 0000000..5d6ef3b
--- /dev/null
+++ b/tests/snippets/c/test_preproc_file2.txt
@@ -0,0 +1,17 @@
+---input---
+#include "foo.h"
+# include "foo.h"
+
+---tokens---
+'#' Comment.Preproc
+'include' Comment.Preproc
+' ' Text.Whitespace
+'"foo.h"' Comment.PreprocFile
+'\n' Comment.Preproc
+
+'#' Comment.Preproc
+' ' Text.Whitespace
+'include' Comment.Preproc
+' ' Text.Whitespace
+'"foo.h"' Comment.PreprocFile
+'\n' Comment.Preproc
diff --git a/tests/snippets/c/test_preproc_file3.txt b/tests/snippets/c/test_preproc_file3.txt
new file mode 100644
index 0000000..b36db4c
--- /dev/null
+++ b/tests/snippets/c/test_preproc_file3.txt
@@ -0,0 +1,18 @@
+Space around line break before macro is valid C, but failed to parse previously.
+
+---input---
+foo();
+ #define FOO 0
+
+---tokens---
+'foo' Name
+'(' Punctuation
+')' Punctuation
+';' Punctuation
+' ' Text.Whitespace
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'#' Comment.Preproc
+'define FOO 0' Comment.Preproc
+'\n' Comment.Preproc
diff --git a/tests/snippets/c/test_preproc_file4.txt b/tests/snippets/c/test_preproc_file4.txt
new file mode 100644
index 0000000..db1592d
--- /dev/null
+++ b/tests/snippets/c/test_preproc_file4.txt
@@ -0,0 +1,13 @@
+Comments can precede preprocessor macros
+
+---input---
+/* Comment */ /* Another */ #define FOO 0
+
+---tokens---
+'/* Comment */' Comment.Multiline
+' ' Text.Whitespace
+'/* Another */' Comment.Multiline
+' ' Text.Whitespace
+'#' Comment.Preproc
+'define FOO 0' Comment.Preproc
+'\n' Comment.Preproc
diff --git a/tests/snippets/c/test_preproc_file5.txt b/tests/snippets/c/test_preproc_file5.txt
new file mode 100644
index 0000000..f4a727b
--- /dev/null
+++ b/tests/snippets/c/test_preproc_file5.txt
@@ -0,0 +1,19 @@
+Preprocessor macros should appear only at the beginning of the line.
+Otherwise we should produce an error token.
+
+---input---
+foo(); #define FOO 0
+
+---tokens---
+'foo' Name
+'(' Punctuation
+')' Punctuation
+';' Punctuation
+' ' Text.Whitespace
+'#' Error
+'define' Name
+' ' Text.Whitespace
+'FOO' Name
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_string_resembling_decl_end.txt b/tests/snippets/c/test_string_resembling_decl_end.txt
new file mode 100644
index 0000000..17ce223
--- /dev/null
+++ b/tests/snippets/c/test_string_resembling_decl_end.txt
@@ -0,0 +1,41 @@
+---input---
+// This should not be recognized as a function declaration followed by
+// garbage.
+string xyz(");");
+
+// This should not be recognized as a function definition.
+
+string xyz("){ }");
+
+---tokens---
+'// This should not be recognized as a function declaration followed by\n' Comment.Single
+
+'// garbage.\n' Comment.Single
+
+'string' Name
+' ' Text.Whitespace
+'xyz' Name
+'(' Punctuation
+'"' Literal.String
+');' Literal.String
+'"' Literal.String
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'// This should not be recognized as a function definition.\n' Comment.Single
+
+'\n' Text.Whitespace
+
+'string' Name
+' ' Text.Whitespace
+'xyz' Name
+'(' Punctuation
+'"' Literal.String
+'){ }' Literal.String
+'"' Literal.String
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_switch.txt b/tests/snippets/c/test_switch.txt
new file mode 100644
index 0000000..d1c1148
--- /dev/null
+++ b/tests/snippets/c/test_switch.txt
@@ -0,0 +1,56 @@
+---input---
+int main()
+{
+ switch (0)
+ {
+ case 0:
+ default:
+ ;
+ }
+}
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'main' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'switch' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'0' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'case' Keyword
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'default' Keyword
+':' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/c/test_switch_space_before_colon.txt b/tests/snippets/c/test_switch_space_before_colon.txt
new file mode 100644
index 0000000..583f77c
--- /dev/null
+++ b/tests/snippets/c/test_switch_space_before_colon.txt
@@ -0,0 +1,58 @@
+---input---
+int main()
+{
+ switch (0)
+ {
+ case 0 :
+ default :
+ ;
+ }
+}
+
+---tokens---
+'int' Keyword.Type
+' ' Text.Whitespace
+'main' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'switch' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'0' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'case' Keyword
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+' ' Text.Whitespace
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'default' Keyword
+' ' Text.Whitespace
+':' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/cfm/test_basic_comment.txt b/tests/snippets/cfm/test_basic_comment.txt
new file mode 100644
index 0000000..c07a72a
--- /dev/null
+++ b/tests/snippets/cfm/test_basic_comment.txt
@@ -0,0 +1,8 @@
+---input---
+<!--- cfcomment --->
+
+---tokens---
+'<!---' Comment.Multiline
+' cfcomment ' Comment.Multiline
+'--->' Comment.Multiline
+'\n' Text
diff --git a/tests/snippets/cfm/test_nested_comment.txt b/tests/snippets/cfm/test_nested_comment.txt
new file mode 100644
index 0000000..f8aaf4c
--- /dev/null
+++ b/tests/snippets/cfm/test_nested_comment.txt
@@ -0,0 +1,12 @@
+---input---
+<!--- nested <!--- cfcomment ---> --->
+
+---tokens---
+'<!---' Comment.Multiline
+' nested ' Comment.Multiline
+'<!---' Comment.Multiline
+' cfcomment ' Comment.Multiline
+'--->' Comment.Multiline
+' ' Comment.Multiline
+'--->' Comment.Multiline
+'\n' Text
diff --git a/tests/snippets/coffeescript/test_beware_infinite_loop.txt b/tests/snippets/coffeescript/test_beware_infinite_loop.txt
new file mode 100644
index 0000000..886b706
--- /dev/null
+++ b/tests/snippets/coffeescript/test_beware_infinite_loop.txt
@@ -0,0 +1,14 @@
+# This demonstrates the case that "This isn't really guarding" comment
+# refers to.
+
+---input---
+/a/x;
+
+---tokens---
+'' Text
+'/' Operator
+'a' Name.Other
+'/' Operator
+'x' Name.Other
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/coffeescript/test_mixed_slashes.txt b/tests/snippets/coffeescript/test_mixed_slashes.txt
new file mode 100644
index 0000000..8701fad
--- /dev/null
+++ b/tests/snippets/coffeescript/test_mixed_slashes.txt
@@ -0,0 +1,13 @@
+---input---
+a?/foo/:1/2;
+
+---tokens---
+'a' Name.Other
+'?' Operator
+'/foo/' Literal.String.Regex
+':' Operator
+'1' Literal.Number.Integer
+'/' Operator
+'2' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/conftest.py b/tests/snippets/conftest.py
new file mode 100644
index 0000000..02e6255
--- /dev/null
+++ b/tests/snippets/conftest.py
@@ -0,0 +1,32 @@
+"""
+ Generated lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Checks that lexers output the expected tokens for each sample
+ under lexers/*/test_*.txt.
+
+ After making a change, rather than updating the samples manually,
+ run `pytest --update-goldens tests/lexers`.
+
+ To add a new sample, create a new file matching this pattern.
+ The directory must match the alias of the lexer to be used.
+ Populate only the input, then just `--update-goldens`.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pathlib
+import pytest
+
+from tests.conftest import LexerInlineTestItem
+
+
+def pytest_collect_file(parent, path):
+ if path.ext == '.txt':
+ return LexerTestFile.from_parent(parent, path=pathlib.Path(path))
+
+
+class LexerTestFile(pytest.File):
+ def collect(self):
+ yield LexerInlineTestItem.from_parent(self, name='')
diff --git a/tests/snippets/console/fake_ps2_prompt.txt b/tests/snippets/console/fake_ps2_prompt.txt
new file mode 100644
index 0000000..683b36d
--- /dev/null
+++ b/tests/snippets/console/fake_ps2_prompt.txt
@@ -0,0 +1,14 @@
+# Test that missing backslash means it's no prompt.
+
+---input---
+$ echo "> not a prompt"
+> not a prompt
+
+---tokens---
+'$ ' Generic.Prompt
+'echo' Name.Builtin
+' ' Text.Whitespace
+'"> not a prompt"' Literal.String.Double
+'\n' Text.Whitespace
+
+'> not a prompt\n' Generic.Output
diff --git a/tests/snippets/console/prompt_in_output.txt b/tests/snippets/console/prompt_in_output.txt
new file mode 100644
index 0000000..993bc75
--- /dev/null
+++ b/tests/snippets/console/prompt_in_output.txt
@@ -0,0 +1,21 @@
+# Test that output that looks like a prompt is not detected as such.
+
+---input---
+$ cat \
+> test.txt
+line1
+> file content, not prompt!
+
+---tokens---
+'$ ' Generic.Prompt
+'cat' Text
+' ' Text.Whitespace
+'\\\n' Literal.String.Escape
+
+'> ' Generic.Prompt
+'test.txt' Text
+'\n' Text.Whitespace
+
+'line1\n' Generic.Output
+
+'> file content, not prompt!\n' Generic.Output
diff --git a/tests/snippets/console/ps2_prompt.txt b/tests/snippets/console/ps2_prompt.txt
new file mode 100644
index 0000000..175a473
--- /dev/null
+++ b/tests/snippets/console/ps2_prompt.txt
@@ -0,0 +1,15 @@
+---input---
+$ ls\
+> /does/not/exist
+ls: cannot access ...
+
+---tokens---
+'$ ' Generic.Prompt
+'ls' Text
+'\\\n' Literal.String.Escape
+
+'> ' Generic.Prompt
+'/does/not/exist' Text
+'\n' Text.Whitespace
+
+'ls: cannot access ...\n' Generic.Output
diff --git a/tests/snippets/console/test_comment_after_prompt.txt b/tests/snippets/console/test_comment_after_prompt.txt
new file mode 100644
index 0000000..f115715
--- /dev/null
+++ b/tests/snippets/console/test_comment_after_prompt.txt
@@ -0,0 +1,6 @@
+---input---
+$# comment
+
+---tokens---
+'$' Generic.Prompt
+'# comment\n' Comment.Single
diff --git a/tests/snippets/console/test_newline_in_echo_no_ps2.txt b/tests/snippets/console/test_newline_in_echo_no_ps2.txt
new file mode 100644
index 0000000..57a1190
--- /dev/null
+++ b/tests/snippets/console/test_newline_in_echo_no_ps2.txt
@@ -0,0 +1,16 @@
+---input---
+$ echo \
+ hi
+hi
+
+---tokens---
+'$ ' Generic.Prompt
+'echo' Name.Builtin
+' ' Text.Whitespace
+'\\\n' Literal.String.Escape
+
+' ' Text.Whitespace
+'hi' Text
+'\n' Text.Whitespace
+
+'hi\n' Generic.Output
diff --git a/tests/snippets/console/test_newline_in_echo_ps2.txt b/tests/snippets/console/test_newline_in_echo_ps2.txt
new file mode 100644
index 0000000..b90eead
--- /dev/null
+++ b/tests/snippets/console/test_newline_in_echo_ps2.txt
@@ -0,0 +1,16 @@
+---input---
+$ echo \
+> hi
+hi
+
+---tokens---
+'$ ' Generic.Prompt
+'echo' Name.Builtin
+' ' Text.Whitespace
+'\\\n' Literal.String.Escape
+
+'> ' Generic.Prompt
+'hi' Text
+'\n' Text.Whitespace
+
+'hi\n' Generic.Output
diff --git a/tests/snippets/console/test_newline_in_ls_no_ps2.txt b/tests/snippets/console/test_newline_in_ls_no_ps2.txt
new file mode 100644
index 0000000..3366bc0
--- /dev/null
+++ b/tests/snippets/console/test_newline_in_ls_no_ps2.txt
@@ -0,0 +1,16 @@
+---input---
+$ ls \
+ hi
+hi
+
+---tokens---
+'$ ' Generic.Prompt
+'ls' Text
+' ' Text.Whitespace
+'\\\n' Literal.String.Escape
+
+' ' Text.Whitespace
+'hi' Text
+'\n' Text.Whitespace
+
+'hi\n' Generic.Output
diff --git a/tests/snippets/console/test_newline_in_ls_ps2.txt b/tests/snippets/console/test_newline_in_ls_ps2.txt
new file mode 100644
index 0000000..bf1bae5
--- /dev/null
+++ b/tests/snippets/console/test_newline_in_ls_ps2.txt
@@ -0,0 +1,16 @@
+---input---
+$ ls \
+> hi
+hi
+
+---tokens---
+'$ ' Generic.Prompt
+'ls' Text
+' ' Text.Whitespace
+'\\\n' Literal.String.Escape
+
+'> ' Generic.Prompt
+'hi' Text
+'\n' Text.Whitespace
+
+'hi\n' Generic.Output
diff --git a/tests/snippets/console/test_virtualenv.txt b/tests/snippets/console/test_virtualenv.txt
new file mode 100644
index 0000000..420b07a
--- /dev/null
+++ b/tests/snippets/console/test_virtualenv.txt
@@ -0,0 +1,11 @@
+---input---
+(env) [~/project]$ foo -h
+
+---tokens---
+'(env)' Generic.Prompt.VirtualEnv
+' ' Text
+'[~/project]$ ' Generic.Prompt
+'foo' Text
+' ' Text.Whitespace
+'-h' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/coq/test_unicode.txt b/tests/snippets/coq/test_unicode.txt
new file mode 100644
index 0000000..2007f7d
--- /dev/null
+++ b/tests/snippets/coq/test_unicode.txt
@@ -0,0 +1,15 @@
+---input---
+Check (α ≻ β).
+
+---tokens---
+'Check' Keyword.Namespace
+' ' Text
+'(' Operator
+'α' Name
+' ' Text
+'≻' Name.Builtin.Pseudo
+' ' Text
+'β' Name
+')' Operator
+'.' Operator
+'\n' Text
diff --git a/tests/snippets/cpp/test_good_comment.txt b/tests/snippets/cpp/test_good_comment.txt
new file mode 100644
index 0000000..115516d
--- /dev/null
+++ b/tests/snippets/cpp/test_good_comment.txt
@@ -0,0 +1,6 @@
+---input---
+/* foo */
+
+---tokens---
+'/* foo */' Comment.Multiline
+'\n' Text.Whitespace
diff --git a/tests/snippets/cpp/test_open_comment.txt b/tests/snippets/cpp/test_open_comment.txt
new file mode 100644
index 0000000..3799214
--- /dev/null
+++ b/tests/snippets/cpp/test_open_comment.txt
@@ -0,0 +1,5 @@
+---input---
+/* foo
+
+---tokens---
+'/* foo\n' Comment.Multiline
diff --git a/tests/snippets/cpp/test_unicode_identifiers.txt b/tests/snippets/cpp/test_unicode_identifiers.txt
new file mode 100644
index 0000000..c2f3e03
--- /dev/null
+++ b/tests/snippets/cpp/test_unicode_identifiers.txt
@@ -0,0 +1,146 @@
+---input---
+namespace 𝐨𝐩𝐭𝐢𝐨𝐧 {
+ int _hello();
+}
+
+int cześć = 2;
+
+class 𝐨𝐩𝐭𝐢𝐨𝐧𝐬 final {
+ 𝐨𝐩𝐭𝐢𝐨𝐧𝐬() {
+ 爴:
+ int a = 𝐨𝐩𝐭𝐢𝐨𝐧::hello();
+ };
+
+ static 𝐨𝐩𝐭𝐢𝐨𝐧𝐬 𝔡𝔢𝔣𝔞𝔲𝔩𝔱;
+ static 𝐨𝐩𝐭𝐢𝐨𝐧𝐬 𝔢𝔵𝔠𝔢𝔭𝔱𝔦𝔬𝔫𝔰;
+};
+
+enum class ⅭⅤ { red, green = 15, blue };
+
+---tokens---
+'namespace' Keyword
+' ' Text.Whitespace
+'𝐨𝐩𝐭𝐢𝐨𝐧' Name.Namespace
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'_hello' Name.Function
+'(' Punctuation
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'int' Keyword.Type
+' ' Text.Whitespace
+'cześć' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'class' Keyword
+' ' Text.Whitespace
+'𝐨𝐩𝐭𝐢𝐨𝐧𝐬' Name.Class
+' ' Text.Whitespace
+'final' Keyword
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'𝐨𝐩𝐭𝐢𝐨𝐧𝐬' Name
+'(' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'爴' Name.Label
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'𝐨𝐩𝐭𝐢𝐨𝐧' Name
+':' Operator
+':' Operator
+'hello' Name
+'(' Punctuation
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'static' Keyword
+' ' Text.Whitespace
+'𝐨𝐩𝐭𝐢𝐨𝐧𝐬' Name
+' ' Text.Whitespace
+'𝔡𝔢𝔣𝔞𝔲𝔩𝔱' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'static' Keyword
+' ' Text.Whitespace
+'𝐨𝐩𝐭𝐢𝐨𝐧𝐬' Name
+' ' Text.Whitespace
+'𝔢𝔵𝔠𝔢𝔭𝔱𝔦𝔬𝔫𝔰' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'enum' Keyword
+' ' Text.Whitespace
+'class' Keyword
+' ' Text.Whitespace
+'ⅭⅤ' Name.Class
+' ' Text.Whitespace
+'{' Punctuation
+' ' Text.Whitespace
+'red' Name
+',' Punctuation
+' ' Text.Whitespace
+'green' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'15' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'blue' Name
+' ' Text.Whitespace
+'}' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_annotation.txt b/tests/snippets/crystal/test_annotation.txt
new file mode 100644
index 0000000..b585fa1
--- /dev/null
+++ b/tests/snippets/crystal/test_annotation.txt
@@ -0,0 +1,16 @@
+---input---
+@[FOO::Bar::Baz(opt: "xx")]
+
+---tokens---
+'@[' Operator
+'FOO::Bar::Baz' Name.Decorator
+'(' Punctuation
+'opt' Literal.String.Symbol
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'xx' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+']' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_array_access.txt b/tests/snippets/crystal/test_array_access.txt
new file mode 100644
index 0000000..6b36c51
--- /dev/null
+++ b/tests/snippets/crystal/test_array_access.txt
@@ -0,0 +1,11 @@
+---input---
+[5][5]?
+
+---tokens---
+'[' Operator
+'5' Literal.Number.Integer
+']' Operator
+'[' Operator
+'5' Literal.Number.Integer
+']?' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_chars.txt b/tests/snippets/crystal/test_chars.txt
new file mode 100644
index 0000000..c254bfb
--- /dev/null
+++ b/tests/snippets/crystal/test_chars.txt
@@ -0,0 +1,25 @@
+---input---
+'a'
+'я'
+'\u{1234}'
+'
+'
+'abc'
+
+---tokens---
+"'a'" Literal.String.Char
+'\n' Text.Whitespace
+
+"'я'" Literal.String.Char
+'\n' Text.Whitespace
+
+"'\\u{1234}'" Literal.String.Char
+'\n' Text.Whitespace
+
+"'\n'" Literal.String.Char
+'\n' Text.Whitespace
+
+"'" Error
+'abc' Name
+"'" Error
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_constant_and_module.txt b/tests/snippets/crystal/test_constant_and_module.txt
new file mode 100644
index 0000000..f8d33ff
--- /dev/null
+++ b/tests/snippets/crystal/test_constant_and_module.txt
@@ -0,0 +1,14 @@
+---input---
+HTTP
+HTTP::Server.new
+
+---tokens---
+'HTTP' Name.Constant
+'\n' Text.Whitespace
+
+'HTTP' Name
+'::' Operator
+'Server' Name
+'.' Operator
+'new' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_escaped_bracestring.txt b/tests/snippets/crystal/test_escaped_bracestring.txt
new file mode 100644
index 0000000..14718b9
--- /dev/null
+++ b/tests/snippets/crystal/test_escaped_bracestring.txt
@@ -0,0 +1,19 @@
+---input---
+str.gsub(%r{\\\\}, "/")
+
+---tokens---
+'str' Name
+'.' Operator
+'gsub' Name
+'(' Punctuation
+'%r{' Literal.String.Regex
+'\\\\' Literal.String.Regex
+'\\\\' Literal.String.Regex
+'}' Literal.String.Regex
+',' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'/' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_escaped_interpolation.txt b/tests/snippets/crystal/test_escaped_interpolation.txt
new file mode 100644
index 0000000..c623464
--- /dev/null
+++ b/tests/snippets/crystal/test_escaped_interpolation.txt
@@ -0,0 +1,9 @@
+---input---
+"\#{a + b}"
+
+---tokens---
+'"' Literal.String.Double
+'\\#' Literal.String.Escape
+'{a + b}' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_interpolation_nested_curly.txt b/tests/snippets/crystal/test_interpolation_nested_curly.txt
new file mode 100644
index 0000000..f4a69f7
--- /dev/null
+++ b/tests/snippets/crystal/test_interpolation_nested_curly.txt
@@ -0,0 +1,56 @@
+---input---
+"A#{ (3..5).group_by { |x| x/2}.map do |k,v| "#{k}" end.join }" + "Z"
+
+---tokens---
+'"' Literal.String.Double
+'A' Literal.String.Double
+'#{' Literal.String.Interpol
+' ' Text.Whitespace
+'(' Punctuation
+'3' Literal.Number.Integer
+'..' Operator
+'5' Literal.Number.Integer
+')' Punctuation
+'.' Operator
+'group_by' Name
+' ' Text.Whitespace
+'{' Literal.String.Interpol
+' ' Text.Whitespace
+'|' Operator
+'x' Name
+'|' Operator
+' ' Text.Whitespace
+'x' Name
+'/' Operator
+'2' Literal.Number.Integer
+'}' Literal.String.Interpol
+'.' Operator
+'map' Name
+' ' Text.Whitespace
+'do' Keyword
+' ' Text.Whitespace
+'|' Operator
+'k' Name
+',' Punctuation
+'v' Name
+'|' Operator
+' ' Text.Whitespace
+'"' Literal.String.Double
+'#{' Literal.String.Interpol
+'k' Name
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+' ' Text.Whitespace
+'end' Keyword
+'.' Operator
+'join' Name
+' ' Text.Whitespace
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'"' Literal.String.Double
+'Z' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_lib.txt b/tests/snippets/crystal/test_lib.txt
new file mode 100644
index 0000000..6f6f107
--- /dev/null
+++ b/tests/snippets/crystal/test_lib.txt
@@ -0,0 +1,58 @@
+---input---
+@[Link("some")]
+lib LibSome
+@[CallConvention("X86_StdCall")]
+fun foo="some.foo"(thing : Void*) : LibC::Int
+end
+
+---tokens---
+'@[' Operator
+'Link' Name.Decorator
+'(' Punctuation
+'"' Literal.String.Double
+'some' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+']' Operator
+'\n' Text.Whitespace
+
+'lib' Keyword
+' ' Text.Whitespace
+'LibSome' Name.Namespace
+'\n' Text.Whitespace
+
+'@[' Operator
+'CallConvention' Name.Decorator
+'(' Punctuation
+'"' Literal.String.Double
+'X86_StdCall' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+']' Operator
+'\n' Text.Whitespace
+
+'fun' Keyword
+' ' Text.Whitespace
+'foo' Name.Function
+'=' Operator
+'"' Literal.String.Double
+'some.foo' Literal.String.Double
+'"' Literal.String.Double
+'(' Punctuation
+'thing' Name
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'Void' Name
+'*' Operator
+')' Punctuation
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'LibC' Name
+'::' Operator
+'Int' Name
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_macro.txt b/tests/snippets/crystal/test_macro.txt
new file mode 100644
index 0000000..765caa2
--- /dev/null
+++ b/tests/snippets/crystal/test_macro.txt
@@ -0,0 +1,76 @@
+---input---
+def<=>(other : self) : Int
+{%for field in %w(first_name middle_name last_name)%}
+cmp={{field.id}}<=>other.{{field.id}}
+return cmp if cmp!=0
+{%end%}
+0
+end
+
+---tokens---
+'def' Keyword
+'<=>' Name.Function
+'(' Punctuation
+'other' Name
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'self' Keyword
+')' Punctuation
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'Int' Name
+'\n' Text.Whitespace
+
+'{%' Literal.String.Interpol
+'for' Keyword
+' ' Text.Whitespace
+'field' Name
+' ' Text.Whitespace
+'in' Keyword
+' ' Text.Whitespace
+'%w(' Literal.String.Other
+'first_name middle_name last_name' Literal.String.Other
+')' Literal.String.Other
+'%}' Literal.String.Interpol
+'\n' Text.Whitespace
+
+'cmp' Name
+'=' Operator
+'{{' Literal.String.Interpol
+'field' Name
+'.' Operator
+'id' Name
+'}}' Literal.String.Interpol
+'<=>' Operator
+'other' Name
+'.' Operator
+'{{' Literal.String.Interpol
+'field' Name
+'.' Operator
+'id' Name
+'}}' Literal.String.Interpol
+'\n' Text.Whitespace
+
+'return' Keyword
+' ' Text.Whitespace
+'cmp' Name
+' ' Text.Whitespace
+'if' Keyword
+' ' Text.Whitespace
+'cmp' Name
+'!=' Operator
+'0' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'{%' Literal.String.Interpol
+'end' Keyword
+'%}' Literal.String.Interpol
+'\n' Text.Whitespace
+
+'0' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_operator_methods.txt b/tests/snippets/crystal/test_operator_methods.txt
new file mode 100644
index 0000000..084771f
--- /dev/null
+++ b/tests/snippets/crystal/test_operator_methods.txt
@@ -0,0 +1,18 @@
+---input---
+([] of Int32).[]?(5)
+
+---tokens---
+'(' Punctuation
+'[' Operator
+']' Operator
+' ' Text.Whitespace
+'of' Keyword
+' ' Text.Whitespace
+'Int32' Name
+')' Punctuation
+'.' Operator
+'[]?' Name.Operator
+'(' Punctuation
+'5' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_percent_strings.txt b/tests/snippets/crystal/test_percent_strings.txt
new file mode 100644
index 0000000..80f247c
--- /dev/null
+++ b/tests/snippets/crystal/test_percent_strings.txt
@@ -0,0 +1,41 @@
+---input---
+%(hello ("world"))
+%[hello ["world"]]
+%{hello "world"}
+%<hello <"world">>
+%|hello "world"|
+
+---tokens---
+'%(' Literal.String.Other
+'hello ' Literal.String.Other
+'(' Literal.String.Other
+'"world"' Literal.String.Other
+')' Literal.String.Other
+')' Literal.String.Other
+'\n' Text.Whitespace
+
+'%[' Literal.String.Other
+'hello ' Literal.String.Other
+'[' Literal.String.Other
+'"world"' Literal.String.Other
+']' Literal.String.Other
+']' Literal.String.Other
+'\n' Text.Whitespace
+
+'%{' Literal.String.Other
+'hello "world"' Literal.String.Other
+'}' Literal.String.Other
+'\n' Text.Whitespace
+
+'%<' Literal.String.Other
+'hello ' Literal.String.Other
+'<' Literal.String.Other
+'"world"' Literal.String.Other
+'>' Literal.String.Other
+'>' Literal.String.Other
+'\n' Text.Whitespace
+
+'%|' Literal.String.Other
+'hello "world"' Literal.String.Other
+'|' Literal.String.Other
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_percent_strings_special.txt b/tests/snippets/crystal/test_percent_strings_special.txt
new file mode 100644
index 0000000..4ca1c0b
--- /dev/null
+++ b/tests/snippets/crystal/test_percent_strings_special.txt
@@ -0,0 +1,31 @@
+---input---
+%Q(hello \n #{name})
+%q(hello \n #{name})
+%w(foo\nbar baz)
+
+---tokens---
+'%Q(' Literal.String.Other
+'hello ' Literal.String.Other
+'\\n' Literal.String.Escape
+' ' Literal.String.Other
+'#{' Literal.String.Interpol
+'name' Name
+'}' Literal.String.Interpol
+')' Literal.String.Other
+'\n' Text.Whitespace
+
+'%q(' Literal.String.Other
+'hello ' Literal.String.Other
+'\\' Literal.String.Other
+'n ' Literal.String.Other
+'#' Literal.String.Other
+'{name}' Literal.String.Other
+')' Literal.String.Other
+'\n' Text.Whitespace
+
+'%w(' Literal.String.Other
+'foo' Literal.String.Other
+'\\' Literal.String.Other
+'nbar baz' Literal.String.Other
+')' Literal.String.Other
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_pseudo_builtins.txt b/tests/snippets/crystal/test_pseudo_builtins.txt
new file mode 100644
index 0000000..9c8c917
--- /dev/null
+++ b/tests/snippets/crystal/test_pseudo_builtins.txt
@@ -0,0 +1,20 @@
+---input---
+record Cls do
+def_equals s
+end
+
+---tokens---
+'record' Name.Builtin.Pseudo
+' ' Text.Whitespace
+'Cls' Name
+' ' Text.Whitespace
+'do' Keyword
+'\n' Text.Whitespace
+
+'def_equals' Name.Builtin.Pseudo
+' ' Text.Whitespace
+'s' Name
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_pseudo_keywords.txt b/tests/snippets/crystal/test_pseudo_keywords.txt
new file mode 100644
index 0000000..18827ad
--- /dev/null
+++ b/tests/snippets/crystal/test_pseudo_keywords.txt
@@ -0,0 +1,50 @@
+---input---
+def f(x : T, line = __LINE__) forall T
+if x.is_a?(String)
+pp! x
+end
+end
+
+---tokens---
+'def' Keyword
+' ' Text.Whitespace
+'f' Name.Function
+'(' Punctuation
+'x' Name
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'T' Name
+',' Punctuation
+' ' Text.Whitespace
+'line' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'__LINE__' Keyword.Pseudo
+')' Punctuation
+' ' Text.Whitespace
+'forall' Keyword.Pseudo
+' ' Text.Whitespace
+'T' Name
+'\n' Text.Whitespace
+
+'if' Keyword
+' ' Text.Whitespace
+'x' Name
+'.is_a?' Keyword.Pseudo
+'(' Punctuation
+'String' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+'pp!' Name.Builtin.Pseudo
+' ' Text.Whitespace
+'x' Name
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_range_syntax1.txt b/tests/snippets/crystal/test_range_syntax1.txt
new file mode 100644
index 0000000..a3ba24a
--- /dev/null
+++ b/tests/snippets/crystal/test_range_syntax1.txt
@@ -0,0 +1,8 @@
+---input---
+1...3
+
+---tokens---
+'1' Literal.Number.Integer
+'...' Operator
+'3' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/crystal/test_range_syntax2.txt b/tests/snippets/crystal/test_range_syntax2.txt
new file mode 100644
index 0000000..08bf4b1
--- /dev/null
+++ b/tests/snippets/crystal/test_range_syntax2.txt
@@ -0,0 +1,10 @@
+---input---
+1 .. 3
+
+---tokens---
+'1' Literal.Number.Integer
+' ' Text.Whitespace
+'..' Operator
+' ' Text.Whitespace
+'3' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_braced_strings.txt b/tests/snippets/csound/test_braced_strings.txt
new file mode 100644
index 0000000..6921af9
--- /dev/null
+++ b/tests/snippets/csound/test_braced_strings.txt
@@ -0,0 +1,11 @@
+---input---
+{{
+characters$MACRO.
+}}
+
+---tokens---
+'{{' Literal.String
+'\ncharacters$MACRO.\n' Literal.String
+
+'}}' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_comments.txt b/tests/snippets/csound/test_comments.txt
new file mode 100644
index 0000000..e660c2b
--- /dev/null
+++ b/tests/snippets/csound/test_comments.txt
@@ -0,0 +1,16 @@
+---input---
+/*
+ * comment
+ */
+; comment
+// comment
+
+---tokens---
+'/*\n * comment\n */' Comment.Multiline
+'\n' Text.Whitespace
+
+'; comment' Comment.Single
+'\n' Text.Whitespace
+
+'// comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_escape_sequences.txt b/tests/snippets/csound/test_escape_sequences.txt
new file mode 100644
index 0000000..8e7ba10
--- /dev/null
+++ b/tests/snippets/csound/test_escape_sequences.txt
@@ -0,0 +1,122 @@
+---input---
+"\\"
+{{\\}}
+"\a"
+{{\a}}
+"\b"
+{{\b}}
+"\n"
+{{\n}}
+"\r"
+{{\r}}
+"\t"
+{{\t}}
+"\""
+{{\"}}
+"\012"
+{{\012}}
+"\345"
+{{\345}}
+"\67"
+{{\67}}
+
+---tokens---
+'"' Literal.String
+'\\\\' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\\\' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\a' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\a' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\b' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\b' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\n' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\n' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\r' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\r' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\t' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\t' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\"' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\"' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\012' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\012' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\345' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\345' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'\\67' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'{{' Literal.String
+'\\67' Literal.String.Escape
+'}}' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_function_like_macro_definitions.txt b/tests/snippets/csound/test_function_like_macro_definitions.txt
new file mode 100644
index 0000000..555f4ad
--- /dev/null
+++ b/tests/snippets/csound/test_function_like_macro_definitions.txt
@@ -0,0 +1,44 @@
+---input---
+#define MACRO(ARG1#ARG2) #macro_body#
+#define/**/
+MACRO(ARG1'ARG2' ARG3)/**/
+#\#macro
+body\##
+
+---tokens---
+'#define' Comment.Preproc
+' ' Text.Whitespace
+'MACRO' Comment.Preproc
+'(' Punctuation
+'ARG1' Comment.Preproc
+'#' Punctuation
+'ARG2' Comment.Preproc
+')' Punctuation
+' ' Text.Whitespace
+'#' Punctuation
+'macro_body' Comment.Preproc
+'#' Punctuation
+'\n' Text.Whitespace
+
+'#define' Comment.Preproc
+'/**/' Comment.Multiline
+'\n' Text.Whitespace
+
+'MACRO' Comment.Preproc
+'(' Punctuation
+'ARG1' Comment.Preproc
+"'" Punctuation
+'ARG2' Comment.Preproc
+"'" Punctuation
+' ' Text.Whitespace
+'ARG3' Comment.Preproc
+')' Punctuation
+'/**/' Comment.Multiline
+'\n' Text.Whitespace
+
+'#' Punctuation
+'\\#' Comment.Preproc
+'macro\nbody' Comment.Preproc
+'\\#' Comment.Preproc
+'#' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_function_like_macros.txt b/tests/snippets/csound/test_function_like_macros.txt
new file mode 100644
index 0000000..955cc18
--- /dev/null
+++ b/tests/snippets/csound/test_function_like_macros.txt
@@ -0,0 +1,40 @@
+---input---
+$MACRO.(((x#y\)))' "(#'x)\)x\))"# {{x\))x)\)(#'}});
+
+---tokens---
+'$MACRO.' Comment.Preproc
+'(' Punctuation
+'(' Comment.Preproc
+'(' Comment.Preproc
+'x#y\\)' Comment.Preproc
+')' Comment.Preproc
+')' Comment.Preproc
+"'" Punctuation
+' ' Comment.Preproc
+'"' Literal.String
+'(' Error
+'#' Error
+"'" Error
+'x' Literal.String
+')' Error
+'\\)' Comment.Preproc
+'x' Literal.String
+'\\)' Comment.Preproc
+')' Error
+'"' Literal.String
+'#' Punctuation
+' ' Comment.Preproc
+'{{' Literal.String
+'x' Literal.String
+'\\)' Comment.Preproc
+')' Error
+'x' Literal.String
+')' Error
+'\\)' Comment.Preproc
+'(' Error
+'#' Error
+"'" Error
+'}}' Literal.String
+')' Punctuation
+';' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_global_value_identifiers.txt b/tests/snippets/csound/test_global_value_identifiers.txt
new file mode 100644
index 0000000..4604200
--- /dev/null
+++ b/tests/snippets/csound/test_global_value_identifiers.txt
@@ -0,0 +1,30 @@
+---input---
+0dbfs
+A4
+kr
+ksmps
+nchnls
+nchnls_i
+sr
+
+---tokens---
+'0dbfs' Name.Variable.Global
+'\n' Text.Whitespace
+
+'A4' Name.Variable.Global
+'\n' Text.Whitespace
+
+'kr' Name.Variable.Global
+'\n' Text.Whitespace
+
+'ksmps' Name.Variable.Global
+'\n' Text.Whitespace
+
+'nchnls' Name.Variable.Global
+'\n' Text.Whitespace
+
+'nchnls_i' Name.Variable.Global
+'\n' Text.Whitespace
+
+'sr' Name.Variable.Global
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_goto_statements.txt b/tests/snippets/csound/test_goto_statements.txt
new file mode 100644
index 0000000..95a76ab
--- /dev/null
+++ b/tests/snippets/csound/test_goto_statements.txt
@@ -0,0 +1,176 @@
+---input---
+goto aLabel
+igoto aLabel
+kgoto aLabel
+reinit aLabel
+rigoto aLabel
+tigoto aLabel
+cggoto 1==0, aLabel
+cigoto 1==0, aLabel
+cingoto 1==0, aLabel
+ckgoto 1==0, aLabel
+cngoto 1==0, aLabel
+cnkgoto 1==0, aLabel
+timout 0, 0, aLabel
+loop_ge 0, 0, 0, aLabel
+loop_gt 0, 0, 0, aLabel
+loop_le 0, 0, 0, aLabel
+loop_lt 0, 0, 0, aLabel
+
+---tokens---
+'goto' Keyword
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'igoto' Keyword
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'kgoto' Keyword
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'reinit' Keyword.Pseudo
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'rigoto' Keyword.Pseudo
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'tigoto' Keyword.Pseudo
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'cggoto' Keyword.Pseudo
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'==' Operator
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'cigoto' Keyword.Pseudo
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'==' Operator
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'cingoto' Keyword.Pseudo
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'==' Operator
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'ckgoto' Keyword.Pseudo
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'==' Operator
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'cngoto' Keyword.Pseudo
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'==' Operator
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'cnkgoto' Keyword.Pseudo
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'==' Operator
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'timout' Keyword.Pseudo
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'loop_ge' Keyword.Pseudo
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'loop_gt' Keyword.Pseudo
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'loop_le' Keyword.Pseudo
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
+
+'loop_lt' Keyword.Pseudo
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'aLabel' Name.Label
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_include_directives.txt b/tests/snippets/csound/test_include_directives.txt
new file mode 100644
index 0000000..b90f300
--- /dev/null
+++ b/tests/snippets/csound/test_include_directives.txt
@@ -0,0 +1,14 @@
+---input---
+#include/**/"file.udo"
+#include/**/|file.udo|
+
+---tokens---
+'#include' Comment.Preproc
+'/**/' Comment.Multiline
+'"file.udo"' Literal.String
+'\n' Text.Whitespace
+
+'#include' Comment.Preproc
+'/**/' Comment.Multiline
+'|file.udo|' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_includestr_directives.txt b/tests/snippets/csound/test_includestr_directives.txt
new file mode 100644
index 0000000..0511c5d
--- /dev/null
+++ b/tests/snippets/csound/test_includestr_directives.txt
@@ -0,0 +1,11 @@
+---input---
+#includestr/**/"$MACRO..udo"
+
+---tokens---
+'#includestr' Comment.Preproc
+'/**/' Comment.Multiline
+'"' Literal.String
+'$MACRO.' Comment.Preproc
+'.udo' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_instrument_blocks.txt b/tests/snippets/csound/test_instrument_blocks.txt
new file mode 100644
index 0000000..b6b0ba9
--- /dev/null
+++ b/tests/snippets/csound/test_instrument_blocks.txt
@@ -0,0 +1,42 @@
+---input---
+instr/**/1,/**/N_a_M_e_,/**/+Name/**///
+ iDuration = p3
+ outc:a(aSignal)
+endin
+
+---tokens---
+'instr' Keyword.Declaration
+'/**/' Comment.Multiline
+'1' Name.Function
+',' Punctuation
+'/**/' Comment.Multiline
+'N_a_M_e_' Name.Function
+',' Punctuation
+'/**/' Comment.Multiline
+'+' Punctuation
+'Name' Name.Function
+'/**/' Comment.Multiline
+'//' Comment.Single
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'i' Keyword.Type
+'Duration' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'p3' Name.Variable.Instance
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'outc' Name.Builtin
+':' Punctuation
+'a' Keyword.Type
+'(' Punctuation
+'a' Keyword.Type
+'Signal' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+'endin' Keyword.Declaration
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_keywords.txt b/tests/snippets/csound/test_keywords.txt
new file mode 100644
index 0000000..eb67eca
--- /dev/null
+++ b/tests/snippets/csound/test_keywords.txt
@@ -0,0 +1,62 @@
+---input---
+do
+else
+elseif
+endif
+enduntil
+fi
+if
+ithen
+kthen
+od
+then
+until
+while
+return
+rireturn
+
+---tokens---
+'do' Keyword
+'\n' Text.Whitespace
+
+'else' Keyword
+'\n' Text.Whitespace
+
+'elseif' Keyword
+'\n' Text.Whitespace
+
+'endif' Keyword
+'\n' Text.Whitespace
+
+'enduntil' Keyword
+'\n' Text.Whitespace
+
+'fi' Keyword
+'\n' Text.Whitespace
+
+'if' Keyword
+'\n' Text.Whitespace
+
+'ithen' Keyword
+'\n' Text.Whitespace
+
+'kthen' Keyword
+'\n' Text.Whitespace
+
+'od' Keyword
+'\n' Text.Whitespace
+
+'then' Keyword
+'\n' Text.Whitespace
+
+'until' Keyword
+'\n' Text.Whitespace
+
+'while' Keyword
+'\n' Text.Whitespace
+
+'return' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'rireturn' Keyword.Pseudo
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_labels.txt b/tests/snippets/csound/test_labels.txt
new file mode 100644
index 0000000..afa6be9
--- /dev/null
+++ b/tests/snippets/csound/test_labels.txt
@@ -0,0 +1,13 @@
+---input---
+aLabel:
+ label2:
+
+---tokens---
+'aLabel' Name.Label
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'label2' Name.Label
+':' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_macro_preprocessor_directives.txt b/tests/snippets/csound/test_macro_preprocessor_directives.txt
new file mode 100644
index 0000000..0a576f6
--- /dev/null
+++ b/tests/snippets/csound/test_macro_preprocessor_directives.txt
@@ -0,0 +1,20 @@
+---input---
+#ifdef MACRO
+#ifndef MACRO
+#undef MACRO
+
+---tokens---
+'#ifdef' Comment.Preproc
+' ' Text.Whitespace
+'MACRO' Comment.Preproc
+'\n' Text.Whitespace
+
+'#ifndef' Comment.Preproc
+' ' Text.Whitespace
+'MACRO' Comment.Preproc
+'\n' Text.Whitespace
+
+'#undef' Comment.Preproc
+' ' Text.Whitespace
+'MACRO' Comment.Preproc
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_name.txt b/tests/snippets/csound/test_name.txt
new file mode 100644
index 0000000..7ae5d58
--- /dev/null
+++ b/tests/snippets/csound/test_name.txt
@@ -0,0 +1,9 @@
+---input---
+kG:V
+
+---tokens---
+'k' Keyword.Type
+'G' Name
+':' Punctuation
+'V' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_numbers.txt b/tests/snippets/csound/test_numbers.txt
new file mode 100644
index 0000000..63a8106
--- /dev/null
+++ b/tests/snippets/csound/test_numbers.txt
@@ -0,0 +1,52 @@
+---input---
+123 0123456789
+0xabcdef0123456789 0XABCDEF
+1e2
+3e+4
+5e-6
+7E8
+9E+0
+1E-2
+3.
+4.56
+.789
+
+---tokens---
+'123' Literal.Number.Integer
+' ' Text.Whitespace
+'0123456789' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'0x' Keyword.Type
+'abcdef0123456789' Literal.Number.Hex
+' ' Text.Whitespace
+'0X' Keyword.Type
+'ABCDEF' Literal.Number.Hex
+'\n' Text.Whitespace
+
+'1e2' Literal.Number.Float
+'\n' Text.Whitespace
+
+'3e+4' Literal.Number.Float
+'\n' Text.Whitespace
+
+'5e-6' Literal.Number.Float
+'\n' Text.Whitespace
+
+'7E8' Literal.Number.Float
+'\n' Text.Whitespace
+
+'9E+0' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1E-2' Literal.Number.Float
+'\n' Text.Whitespace
+
+'3.' Literal.Number.Float
+'\n' Text.Whitespace
+
+'4.56' Literal.Number.Float
+'\n' Text.Whitespace
+
+'.789' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_object_like_macro_definitions.txt b/tests/snippets/csound/test_object_like_macro_definitions.txt
new file mode 100644
index 0000000..dd165f3
--- /dev/null
+++ b/tests/snippets/csound/test_object_like_macro_definitions.txt
@@ -0,0 +1,30 @@
+---input---
+# define MACRO#macro_body#
+#define/**/
+MACRO/**/
+#\#macro
+body\##
+
+---tokens---
+'# \tdefine' Comment.Preproc
+' ' Text.Whitespace
+'MACRO' Comment.Preproc
+'#' Punctuation
+'macro_body' Comment.Preproc
+'#' Punctuation
+'\n' Text.Whitespace
+
+'#define' Comment.Preproc
+'/**/' Comment.Multiline
+'\n' Text.Whitespace
+
+'MACRO' Comment.Preproc
+'/**/' Comment.Multiline
+'\n' Text.Whitespace
+
+'#' Punctuation
+'\\#' Comment.Preproc
+'macro\nbody' Comment.Preproc
+'\\#' Comment.Preproc
+'#' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_operators.txt b/tests/snippets/csound/test_operators.txt
new file mode 100644
index 0000000..1154a44
--- /dev/null
+++ b/tests/snippets/csound/test_operators.txt
@@ -0,0 +1,114 @@
+---input---
++
+-
+~
+!
+*
+/
+^
+%
+<<
+>>
+<
+>
+<=
+>=
+==
+!=
+&
+#
+|
+&&
+||
+?
+:
++=
+-=
+*=
+/=
+
+---tokens---
+'+' Operator
+'\n' Text.Whitespace
+
+'-' Operator
+'\n' Text.Whitespace
+
+'~' Operator
+'\n' Text.Whitespace
+
+'¬' Operator
+'\n' Text.Whitespace
+
+'!' Operator
+'\n' Text.Whitespace
+
+'*' Operator
+'\n' Text.Whitespace
+
+'/' Operator
+'\n' Text.Whitespace
+
+'^' Operator
+'\n' Text.Whitespace
+
+'%' Operator
+'\n' Text.Whitespace
+
+'<<' Operator
+'\n' Text.Whitespace
+
+'>>' Operator
+'\n' Text.Whitespace
+
+'<' Operator
+'\n' Text.Whitespace
+
+'>' Operator
+'\n' Text.Whitespace
+
+'<=' Operator
+'\n' Text.Whitespace
+
+'>=' Operator
+'\n' Text.Whitespace
+
+'==' Operator
+'\n' Text.Whitespace
+
+'!=' Operator
+'\n' Text.Whitespace
+
+'&' Operator
+'\n' Text.Whitespace
+
+'#' Operator
+'\n' Text.Whitespace
+
+'|' Operator
+'\n' Text.Whitespace
+
+'&&' Operator
+'\n' Text.Whitespace
+
+'||' Operator
+'\n' Text.Whitespace
+
+'?' Operator
+'\n' Text.Whitespace
+
+':' Operator
+'\n' Text.Whitespace
+
+'+=' Operator
+'\n' Text.Whitespace
+
+'-=' Operator
+'\n' Text.Whitespace
+
+'*=' Operator
+'\n' Text.Whitespace
+
+'/=' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_other_preprocessor_directives.txt b/tests/snippets/csound/test_other_preprocessor_directives.txt
new file mode 100644
index 0000000..95026a0
--- /dev/null
+++ b/tests/snippets/csound/test_other_preprocessor_directives.txt
@@ -0,0 +1,26 @@
+---input---
+#else
+#end
+#endif
+###
+@ 12345
+@@ 67890
+
+---tokens---
+'#else' Comment.Preproc
+'\n' Text.Whitespace
+
+'#end' Comment.Preproc
+'\n' Text.Whitespace
+
+'#endif' Comment.Preproc
+'\n' Text.Whitespace
+
+'###' Comment.Preproc
+'\n' Text.Whitespace
+
+'@ \t12345' Comment.Preproc
+'\n' Text.Whitespace
+
+'@@ \t67890' Comment.Preproc
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_printks_and_prints_escape_sequences.txt b/tests/snippets/csound/test_printks_and_prints_escape_sequences.txt
new file mode 100644
index 0000000..3663dbb
--- /dev/null
+++ b/tests/snippets/csound/test_printks_and_prints_escape_sequences.txt
@@ -0,0 +1,290 @@
+---input---
+printks "%!"
+printks "%%"
+printks "%n"
+printks "%N"
+printks "%r"
+printks "%R"
+printks "%t"
+printks "%T"
+printks "\\a"
+printks "\\A"
+printks "\\b"
+printks "\\B"
+printks "\\n"
+printks "\\N"
+printks "\\r"
+printks "\\R"
+printks "\\t"
+printks "\\T"
+prints "%!"
+prints "%%"
+prints "%n"
+prints "%N"
+prints "%r"
+prints "%R"
+prints "%t"
+prints "%T"
+prints "\\a"
+prints "\\A"
+prints "\\b"
+prints "\\B"
+prints "\\n"
+prints "\\N"
+prints "\\r"
+prints "\\R"
+prints "\\t"
+prints "\\T"
+
+---tokens---
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%!' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%%' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%n' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%N' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%r' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%R' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%t' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%T' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\a' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\A' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\b' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\B' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\n' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\N' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\r' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\R' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\t' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'printks' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\T' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%!' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%%' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%n' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%N' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%r' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%R' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%t' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'%T' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\a' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\A' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\b' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\B' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\n' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\N' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\r' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\R' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\t' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'prints' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String
+'\\\\T' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_quoted_strings.txt b/tests/snippets/csound/test_quoted_strings.txt
new file mode 100644
index 0000000..d7e828a
--- /dev/null
+++ b/tests/snippets/csound/test_quoted_strings.txt
@@ -0,0 +1,9 @@
+---input---
+"characters$MACRO."
+
+---tokens---
+'"' Literal.String
+'characters' Literal.String
+'$MACRO.' Comment.Preproc
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/csound/test_user_defined_opcodes.txt b/tests/snippets/csound/test_user_defined_opcodes.txt
new file mode 100644
index 0000000..6320566
--- /dev/null
+++ b/tests/snippets/csound/test_user_defined_opcodes.txt
@@ -0,0 +1,24 @@
+---input---
+opcode/**/aUDO,/**/i[],/**/aik//
+ aUDO
+endop
+
+---tokens---
+'opcode' Keyword.Declaration
+'/**/' Comment.Multiline
+'aUDO' Name.Function
+',' Punctuation
+'/**/' Comment.Multiline
+'i[]' Keyword.Type
+',' Punctuation
+'/**/' Comment.Multiline
+'aik' Keyword.Type
+'//' Comment.Single
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'aUDO' Name.Function
+'\n' Text.Whitespace
+
+'endop' Keyword.Declaration
+'\n' Text.Whitespace
diff --git a/tests/snippets/doscon/test_gt_only.txt b/tests/snippets/doscon/test_gt_only.txt
new file mode 100644
index 0000000..b37b8fa
--- /dev/null
+++ b/tests/snippets/doscon/test_gt_only.txt
@@ -0,0 +1,11 @@
+---input---
+> py
+hi
+
+---tokens---
+'>' Generic.Prompt
+' ' Text
+'py' Text
+'\n' Text
+
+'hi\n' Generic.Output
diff --git a/tests/snippets/elpi/test_catastrophic_backtracking.txt b/tests/snippets/elpi/test_catastrophic_backtracking.txt
new file mode 100644
index 0000000..a14a054
--- /dev/null
+++ b/tests/snippets/elpi/test_catastrophic_backtracking.txt
@@ -0,0 +1,6 @@
+---input---
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+
+---tokens---
+'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/elpi/test_chr.txt b/tests/snippets/elpi/test_chr.txt
new file mode 100644
index 0000000..75291a3
--- /dev/null
+++ b/tests/snippets/elpi/test_chr.txt
@@ -0,0 +1,54 @@
+---input---
+constraint foo, bar {
+
+ :name "myrule"
+ rule (odd X) \ (even X) | true <=> fail.
+
+}
+rule. % not a kwd
+
+---tokens---
+'constraint' Keyword.Declaration
+' ' Text.Whitespace
+'foo, bar ' Name.Function
+'{' Text
+'\n\n ' Text.Whitespace
+':name' Keyword.Mode
+' ' Text.Whitespace
+'"' Literal.String.Double
+'myrule' Literal.String.Double
+'"' Literal.String.Double
+'\n ' Text.Whitespace
+'rule' Keyword.Declaration
+' ' Text.Whitespace
+'(' Text
+'odd' Text
+' ' Text.Whitespace
+'X' Name.Variable
+')' Operator
+' ' Text.Whitespace
+'\\' Keyword.Declaration
+' ' Text.Whitespace
+'(' Text
+'even' Text
+' ' Text.Whitespace
+'X' Name.Variable
+')' Operator
+' ' Text.Whitespace
+'|' Keyword.Declaration
+' ' Text.Whitespace
+'true' Text
+' ' Text.Whitespace
+'<=>' Keyword.Declaration
+' ' Text.Whitespace
+'fail' Text
+'.' Operator
+'\n\n' Text.Whitespace
+
+'}' Text
+'\n' Text.Whitespace
+
+'rule' Text
+'.' Operator
+' ' Text.Whitespace
+'% not a kwd\n' Comment
diff --git a/tests/snippets/elpi/test_clause.txt b/tests/snippets/elpi/test_clause.txt
new file mode 100644
index 0000000..e485753
--- /dev/null
+++ b/tests/snippets/elpi/test_clause.txt
@@ -0,0 +1,67 @@
+---input---
+true.
+stop :- !.
+of (fun F) :- pi x\ of x => of (F x).
+match (uvar as Y) :- print Y.
+
+---tokens---
+'true' Text
+'.' Operator
+'\n' Text.Whitespace
+
+'stop' Text
+' ' Text.Whitespace
+':-' Keyword.Declaration
+' ' Text.Whitespace
+'!' Keyword.Declaration
+'.' Operator
+'\n' Text.Whitespace
+
+'of' Text
+' ' Text.Whitespace
+'(' Text
+'fun' Text
+' ' Text.Whitespace
+'F' Name.Variable
+')' Operator
+' ' Text.Whitespace
+':-' Keyword.Declaration
+' ' Text.Whitespace
+'pi' Keyword.Declaration
+' ' Text.Whitespace
+'x' Name.Variable
+'\\' Text
+' ' Text.Whitespace
+'of' Text
+' ' Text.Whitespace
+'x' Text
+' ' Text.Whitespace
+'=>' Keyword.Declaration
+' ' Text.Whitespace
+'of' Text
+' ' Text.Whitespace
+'(' Text
+'F' Name.Variable
+' ' Text.Whitespace
+'x' Text
+')' Operator
+'.' Operator
+'\n' Text.Whitespace
+
+'match' Text
+' ' Text.Whitespace
+'(' Text
+'uvar' Keyword.Declaration
+' ' Text.Whitespace
+'as' Keyword.Declaration
+' ' Text.Whitespace
+'Y' Name.Variable
+')' Operator
+' ' Text.Whitespace
+':-' Keyword.Declaration
+' ' Text.Whitespace
+'print' Text
+' ' Text.Whitespace
+'Y' Name.Variable
+'.' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/elpi/test_namespace.txt b/tests/snippets/elpi/test_namespace.txt
new file mode 100644
index 0000000..98a35b0
--- /dev/null
+++ b/tests/snippets/elpi/test_namespace.txt
@@ -0,0 +1,35 @@
+---input---
+namespace foo.bar {
+ baz :- std.do! [].
+}
+shorten foo. { bar }.
+
+---tokens---
+'namespace' Keyword.Declaration
+' ' Text.Whitespace
+'foo.bar' Text
+' ' Text.Whitespace
+'{\n' Text
+
+' ' Text.Whitespace
+'baz' Text
+' ' Text.Whitespace
+':-' Keyword.Declaration
+' ' Text.Whitespace
+'std.do!' Text
+' ' Text.Whitespace
+'[]' Keyword.Declaration
+'.' Operator
+'\n' Text.Whitespace
+
+'}\n' Text
+
+'shorten' Keyword.Declaration
+' ' Text.Whitespace
+'foo.' Text
+' ' Text.Whitespace
+'{ ' Text
+'bar' Text
+' ' Text.Whitespace
+'}.' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/elpi/test_pred.txt b/tests/snippets/elpi/test_pred.txt
new file mode 100644
index 0000000..657c8fd
--- /dev/null
+++ b/tests/snippets/elpi/test_pred.txt
@@ -0,0 +1,60 @@
+---input---
+pred p1.
+pred p2 i:int, o:list A.
+pred p3 i:(bool -> prop).
+:index(_ 2) pred p4 i:int, i:A.
+
+---tokens---
+'pred' Keyword.Declaration
+' ' Text.Whitespace
+'p1' Name.Function
+'.' Text
+'\n' Text.Whitespace
+
+'pred' Keyword.Declaration
+' ' Text.Whitespace
+'p2' Name.Function
+' ' Text.Whitespace
+'i:' Keyword.Mode
+'int' Keyword.Type
+',' Text
+' ' Text.Whitespace
+'o:' Keyword.Mode
+'list' Keyword.Type
+' ' Text.Whitespace
+'A' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'pred' Keyword.Declaration
+' ' Text.Whitespace
+'p3' Name.Function
+' ' Text.Whitespace
+'i:' Keyword.Mode
+'(' Keyword.Type
+'bool' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'prop' Keyword.Type
+')' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+':index' Keyword.Mode
+'(' Text.Whitespace
+'_ 2' Literal.Number.Integer
+')' Text
+' ' Text.Whitespace
+'pred' Keyword.Declaration
+' ' Text.Whitespace
+'p4' Name.Function
+' ' Text.Whitespace
+'i:' Keyword.Mode
+'int' Keyword.Type
+',' Text
+' ' Text.Whitespace
+'i:' Keyword.Mode
+'A' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/elpi/test_type.txt b/tests/snippets/elpi/test_type.txt
new file mode 100644
index 0000000..8a506ce
--- /dev/null
+++ b/tests/snippets/elpi/test_type.txt
@@ -0,0 +1,112 @@
+---input---
+kind list type -> type.
+type nil list A.
+type cons A -> list A -> list A.
+kind tm type.
+type fun (tm -> tm) -> tm.
+type app tm -> tm -> tm.
+pred foo i:(tm -> tm), o:tm.
+
+---tokens---
+'kind' Keyword.Declaration
+' ' Text.Whitespace
+'list' Name.Function
+' ' Text.Whitespace
+'type' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'type' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'type' Keyword.Declaration
+' ' Text.Whitespace
+'nil' Name.Function
+' ' Text.Whitespace
+'list' Keyword.Type
+' ' Text.Whitespace
+'A' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'type' Keyword.Declaration
+' ' Text.Whitespace
+'cons' Name.Function
+' ' Text.Whitespace
+'A' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'list' Keyword.Type
+' ' Text.Whitespace
+'A' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'list' Keyword.Type
+' ' Text.Whitespace
+'A' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'kind' Keyword.Declaration
+' ' Text.Whitespace
+'tm' Name.Function
+' ' Text.Whitespace
+'type' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'type' Keyword.Declaration
+' ' Text.Whitespace
+'fun' Name.Function
+' ' Text.Whitespace
+'(' Keyword.Type
+'tm' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'tm' Keyword.Type
+')' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'tm' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'type' Keyword.Declaration
+' ' Text.Whitespace
+'app' Name.Function
+' ' Text.Whitespace
+'tm' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'tm' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'tm' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
+
+'pred' Keyword.Declaration
+' ' Text.Whitespace
+'foo' Name.Function
+' ' Text.Whitespace
+'i:' Keyword.Mode
+'(' Keyword.Type
+'tm' Keyword.Type
+' ' Text.Whitespace
+'->' Keyword.Type
+' ' Text.Whitespace
+'tm' Keyword.Type
+')' Keyword.Type
+',' Text
+' ' Text.Whitespace
+'o:' Keyword.Mode
+'tm' Keyword.Type
+'.' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/ezhil/test_function.txt b/tests/snippets/ezhil/test_function.txt
new file mode 100644
index 0000000..f89a357
--- /dev/null
+++ b/tests/snippets/ezhil/test_function.txt
@@ -0,0 +1,100 @@
+---input---
+# (C) முத்தையா அண்ணாமலை 2013, 2015
+நிரல்பாகம் gcd ( x, y )
+மு = max(x,y)
+ q = min(x,y)
+
+@( q == 0 ) ஆனால்
+ பின்கொடு மு
+முடி
+பின்கொடு gcd( மு - q , q )
+முடி
+
+---tokens---
+'# (C) முத்தையா அண்ணாமலை 2013, 2015' Comment.Single
+'\n' Text.Whitespace
+
+'நிரல்பாகம்' Keyword
+' ' Text.Whitespace
+'gcd' Name
+' ' Text.Whitespace
+'(' Punctuation
+' ' Text.Whitespace
+'x' Name
+',' Operator
+' ' Text.Whitespace
+'y' Name
+' ' Text.Whitespace
+')' Punctuation
+'\n' Text.Whitespace
+
+'மு' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'max' Name.Builtin
+'(' Punctuation
+'x' Name
+',' Operator
+'y' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'q' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'min' Name.Builtin
+'(' Punctuation
+'x' Name
+',' Operator
+'y' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'@' Operator
+'(' Punctuation
+' ' Text.Whitespace
+'q' Name
+' ' Text.Whitespace
+'==' Operator
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+' ' Text.Whitespace
+')' Punctuation
+' ' Text.Whitespace
+'ஆனால்' Keyword
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'பின்கொடு' Keyword
+' ' Text.Whitespace
+'மு' Name
+'\n' Text.Whitespace
+
+'முடி' Keyword
+'\n' Text.Whitespace
+
+'பின்கொடு' Keyword
+' ' Text.Whitespace
+'gcd' Name
+'(' Punctuation
+' ' Text.Whitespace
+'மு' Name
+' ' Text.Whitespace
+'-' Operator
+' ' Text.Whitespace
+'q' Name
+' ' Text.Whitespace
+',' Operator
+' ' Text.Whitespace
+'q' Name
+' ' Text.Whitespace
+')' Punctuation
+'\n' Text.Whitespace
+
+'முடி' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/ezhil/test_gcd_expr.txt b/tests/snippets/ezhil/test_gcd_expr.txt
new file mode 100644
index 0000000..f4aaed9
--- /dev/null
+++ b/tests/snippets/ezhil/test_gcd_expr.txt
@@ -0,0 +1,21 @@
+---input---
+1^3+(5-5)*gcd(a,b)
+
+---tokens---
+'1' Literal.Number.Integer
+'^' Operator
+'3' Literal.Number.Integer
+'+' Operator
+'(' Punctuation
+'5' Literal.Number.Integer
+'-' Operator
+'5' Literal.Number.Integer
+')' Punctuation
+'*' Operator
+'gcd' Name
+'(' Punctuation
+'a' Name
+',' Operator
+'b' Name
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/ezhil/test_if_statement.txt b/tests/snippets/ezhil/test_if_statement.txt
new file mode 100644
index 0000000..2fe35b7
--- /dev/null
+++ b/tests/snippets/ezhil/test_if_statement.txt
@@ -0,0 +1,28 @@
+---input---
+@( 0 > 3 ) ஆனால்
+ பதிப்பி "wont print"
+முடி
+
+---tokens---
+'@' Operator
+'(' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+' ' Text.Whitespace
+'>' Operator
+' ' Text.Whitespace
+'3' Literal.Number.Integer
+' ' Text.Whitespace
+')' Punctuation
+' ' Text.Whitespace
+'ஆனால்' Keyword
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'பதிப்பி' Keyword
+' ' Text.Whitespace
+'"wont print"' Literal.String
+'\n' Text.Whitespace
+
+'முடி' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/ezhil/test_sum.txt b/tests/snippets/ezhil/test_sum.txt
new file mode 100644
index 0000000..fa2063d
--- /dev/null
+++ b/tests/snippets/ezhil/test_sum.txt
@@ -0,0 +1,8 @@
+---input---
+1+3
+
+---tokens---
+'1' Literal.Number.Integer
+'+' Operator
+'3' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/fortran/test_string_cataback.txt b/tests/snippets/fortran/test_string_cataback.txt
new file mode 100644
index 0000000..9f4b9f1
--- /dev/null
+++ b/tests/snippets/fortran/test_string_cataback.txt
@@ -0,0 +1,112 @@
+---input---
+! Bad string, there isn't an even number of backslashes.
+! This should not cause catastrophic backtracking.
+'\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+
+---tokens---
+"! Bad string, there isn't an even number of backslashes.\n" Comment
+
+'! This should not cause catastrophic backtracking.\n' Comment
+
+"'" Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+'\\' Error
+"'" Error
+'\n' Text.Whitespace
diff --git a/tests/snippets/gas/test_comments.txt b/tests/snippets/gas/test_comments.txt
new file mode 100644
index 0000000..b385d27
--- /dev/null
+++ b/tests/snippets/gas/test_comments.txt
@@ -0,0 +1,29 @@
+---input---
+lock addq $0, /* comments */ (%rsp) /*
+// comments
+*/ xorq %rax, %rax // comments
+
+---tokens---
+'lock' Name.Attribute
+' ' Text.Whitespace
+'addq' Name.Function
+' ' Text.Whitespace
+'$0' Name.Constant
+',' Punctuation
+' ' Text.Whitespace
+'/* comments */' Comment.Multiline
+' ' Text.Whitespace
+'(' Punctuation
+'%rsp' Name.Variable
+')' Punctuation
+' ' Text.Whitespace
+'/*\n// comments\n*/' Comment.Multiline
+' ' Text.Whitespace
+'xorq' Name.Function
+' ' Text.Whitespace
+'%rax' Name.Variable
+',' Punctuation
+' ' Text.Whitespace
+'%rax' Name.Variable
+' ' Text.Whitespace
+'// comments\n' Comment.Single
diff --git a/tests/snippets/gdscript/test_comment.txt b/tests/snippets/gdscript/test_comment.txt
new file mode 100644
index 0000000..ee78bc5
--- /dev/null
+++ b/tests/snippets/gdscript/test_comment.txt
@@ -0,0 +1,6 @@
+---input---
+# Comment
+
+---tokens---
+'# Comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_export_array.txt b/tests/snippets/gdscript/test_export_array.txt
new file mode 100644
index 0000000..24a313a
--- /dev/null
+++ b/tests/snippets/gdscript/test_export_array.txt
@@ -0,0 +1,17 @@
+---input---
+export (Array, AudioStream) var streams
+
+---tokens---
+'export' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'Array' Name.Builtin.Type
+',' Punctuation
+' ' Text.Whitespace
+'AudioStream' Name
+')' Punctuation
+' ' Text.Whitespace
+'var' Keyword
+' ' Text.Whitespace
+'streams' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_function_with_types.txt b/tests/snippets/gdscript/test_function_with_types.txt
new file mode 100644
index 0000000..eedf371
--- /dev/null
+++ b/tests/snippets/gdscript/test_function_with_types.txt
@@ -0,0 +1,33 @@
+---input---
+func abc(arg: String) -> void:
+ print("Hello", arg)
+
+---tokens---
+'func' Keyword
+' ' Text.Whitespace
+'abc' Name
+'(' Punctuation
+'arg' Name
+':' Punctuation
+' ' Text.Whitespace
+'String' Name.Builtin.Type
+')' Punctuation
+' ' Text.Whitespace
+'-' Operator
+'>' Operator
+' ' Text.Whitespace
+'void' Name.Builtin.Type
+':' Punctuation
+'\n' Text.Whitespace
+
+'\t' Text.Whitespace
+'print' Name.Builtin
+'(' Punctuation
+'"' Literal.String.Double
+'Hello' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+' ' Text.Whitespace
+'arg' Name
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_inner_class.txt b/tests/snippets/gdscript/test_inner_class.txt
new file mode 100644
index 0000000..734242b
--- /dev/null
+++ b/tests/snippets/gdscript/test_inner_class.txt
@@ -0,0 +1,20 @@
+---input---
+class InnerClass:
+ var a = 5
+
+---tokens---
+'class' Keyword
+' ' Text.Whitespace
+'InnerClass' Name
+':' Punctuation
+'\n' Text.Whitespace
+
+'\t' Text.Whitespace
+'var' Keyword
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'5' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_multiline_string.txt b/tests/snippets/gdscript/test_multiline_string.txt
new file mode 100644
index 0000000..b098207
--- /dev/null
+++ b/tests/snippets/gdscript/test_multiline_string.txt
@@ -0,0 +1,8 @@
+---input---
+"""
+Multiline
+"""
+
+---tokens---
+'"""\nMultiline\n"""' Literal.String.Doc
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_signal.txt b/tests/snippets/gdscript/test_signal.txt
new file mode 100644
index 0000000..43aa8ec
--- /dev/null
+++ b/tests/snippets/gdscript/test_signal.txt
@@ -0,0 +1,15 @@
+---input---
+signal sig (arg1, arg2)
+
+---tokens---
+'signal' Keyword
+' ' Text.Whitespace
+'sig' Name
+' ' Text.Whitespace
+'(' Punctuation
+'arg1' Name
+',' Punctuation
+' ' Text.Whitespace
+'arg2' Name
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_simple_function.txt b/tests/snippets/gdscript/test_simple_function.txt
new file mode 100644
index 0000000..2f444ef
--- /dev/null
+++ b/tests/snippets/gdscript/test_simple_function.txt
@@ -0,0 +1,22 @@
+---input---
+func abc(arg):
+ print("Hello, World!")
+
+---tokens---
+'func' Keyword
+' ' Text.Whitespace
+'abc' Name
+'(' Punctuation
+'arg' Name
+')' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
+
+'\t' Text.Whitespace
+'print' Name.Builtin
+'(' Punctuation
+'"' Literal.String.Double
+'Hello, World!' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/gdscript/test_variable_declaration_and_assigment.txt b/tests/snippets/gdscript/test_variable_declaration_and_assigment.txt
new file mode 100644
index 0000000..b2ee890
--- /dev/null
+++ b/tests/snippets/gdscript/test_variable_declaration_and_assigment.txt
@@ -0,0 +1,12 @@
+---input---
+var abc = 5.4
+
+---tokens---
+'var' Keyword
+' ' Text.Whitespace
+'abc' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'5.4' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/haskell/test_promoted_names.txt b/tests/snippets/haskell/test_promoted_names.txt
new file mode 100644
index 0000000..42d7755
--- /dev/null
+++ b/tests/snippets/haskell/test_promoted_names.txt
@@ -0,0 +1,10 @@
+---input---
+'x ': '[]
+
+---tokens---
+"'x" Name
+' ' Text.Whitespace
+"':" Keyword.Type
+' ' Text.Whitespace
+"'[]" Keyword.Type
+'\n' Text.Whitespace
diff --git a/tests/snippets/html/multiline-comment-catastrophic-backtracking.txt b/tests/snippets/html/multiline-comment-catastrophic-backtracking.txt
new file mode 100644
index 0000000..76f2fb9
--- /dev/null
+++ b/tests/snippets/html/multiline-comment-catastrophic-backtracking.txt
@@ -0,0 +1,34 @@
+---input---
+<!--
+this
+comment
+is
+never
+terminated
+...
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+...
+
+---tokens---
+'<' Error
+'!--\nthis\ncomment\nis\nnever\nterminated\n...\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n...\n' Text
diff --git a/tests/snippets/http/test_application_calendar_xml.txt b/tests/snippets/http/test_application_calendar_xml.txt
new file mode 100644
index 0000000..beb6386
--- /dev/null
+++ b/tests/snippets/http/test_application_calendar_xml.txt
@@ -0,0 +1,28 @@
+---input---
+GET / HTTP/1.0
+Content-Type: application/calendar+xml
+
+<foo>
+
+---tokens---
+'GET' Name.Function
+' ' Text
+'/' Name.Namespace
+' ' Text
+'HTTP' Keyword.Reserved
+'/' Operator
+'1.0' Literal.Number
+'\n' Text
+
+'Content-Type' Name.Attribute
+'' Text
+':' Operator
+' ' Text
+'application/calendar+xml' Literal
+'\n' Text
+
+'\n' Text
+
+'<foo' Name.Tag
+'>' Name.Tag
+'\n' Text.Whitespace
diff --git a/tests/snippets/http/test_application_xml.txt b/tests/snippets/http/test_application_xml.txt
new file mode 100644
index 0000000..97b2943
--- /dev/null
+++ b/tests/snippets/http/test_application_xml.txt
@@ -0,0 +1,28 @@
+---input---
+GET / HTTP/1.0
+Content-Type: application/xml
+
+<foo>
+
+---tokens---
+'GET' Name.Function
+' ' Text
+'/' Name.Namespace
+' ' Text
+'HTTP' Keyword.Reserved
+'/' Operator
+'1.0' Literal.Number
+'\n' Text
+
+'Content-Type' Name.Attribute
+'' Text
+':' Operator
+' ' Text
+'application/xml' Literal
+'\n' Text
+
+'\n' Text
+
+'<foo' Name.Tag
+'>' Name.Tag
+'\n' Text.Whitespace
diff --git a/tests/snippets/http/test_http_status_line.txt b/tests/snippets/http/test_http_status_line.txt
new file mode 100644
index 0000000..8f8449d
--- /dev/null
+++ b/tests/snippets/http/test_http_status_line.txt
@@ -0,0 +1,12 @@
+---input---
+HTTP/1.1 200 OK
+
+---tokens---
+'HTTP' Keyword.Reserved
+'/' Operator
+'1.1' Literal.Number
+' ' Text
+'200' Literal.Number
+' ' Text
+'OK' Name.Exception
+'\n' Text
diff --git a/tests/snippets/http/test_http_status_line_without_reason_phrase.txt b/tests/snippets/http/test_http_status_line_without_reason_phrase.txt
new file mode 100644
index 0000000..91bfa0e
--- /dev/null
+++ b/tests/snippets/http/test_http_status_line_without_reason_phrase.txt
@@ -0,0 +1,10 @@
+---input---
+HTTP/1.1 200
+
+---tokens---
+'HTTP' Keyword.Reserved
+'/' Operator
+'1.1' Literal.Number
+' ' Text
+'200' Literal.Number
+'\n' Text
diff --git a/tests/snippets/http/test_http_status_line_without_reason_phrase_rfc_7230.txt b/tests/snippets/http/test_http_status_line_without_reason_phrase_rfc_7230.txt
new file mode 100644
index 0000000..e0c9896
--- /dev/null
+++ b/tests/snippets/http/test_http_status_line_without_reason_phrase_rfc_7230.txt
@@ -0,0 +1,11 @@
+---input---
+HTTP/1.1 200
+
+---tokens---
+'HTTP' Keyword.Reserved
+'/' Operator
+'1.1' Literal.Number
+' ' Text
+'200' Literal.Number
+' ' Text
+'\n' Text
diff --git a/tests/snippets/idris/test_compiler_directive.txt b/tests/snippets/idris/test_compiler_directive.txt
new file mode 100644
index 0000000..68e18cb
--- /dev/null
+++ b/tests/snippets/idris/test_compiler_directive.txt
@@ -0,0 +1,20 @@
+---input---
+%link C "object.o"
+%name Vect xs
+
+---tokens---
+'%link' Keyword.Reserved
+' ' Text.Whitespace
+'C' Keyword.Type
+' ' Text.Whitespace
+'"' Literal.String
+'object.o' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'%name' Keyword.Reserved
+' ' Text.Whitespace
+'Vect' Keyword.Type
+' ' Text.Whitespace
+'xs' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/idris/test_reserved_word.txt b/tests/snippets/idris/test_reserved_word.txt
new file mode 100644
index 0000000..636bc16
--- /dev/null
+++ b/tests/snippets/idris/test_reserved_word.txt
@@ -0,0 +1,29 @@
+---input---
+namespace Foobar
+ links : String
+ links = "abc"
+
+---tokens---
+'namespace' Keyword.Reserved
+' ' Text.Whitespace
+'Foobar' Keyword.Type
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'links' Name.Function
+' ' Text.Whitespace
+':' Operator.Word
+' ' Text.Whitespace
+'String' Keyword.Type
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+' ' Text.Whitespace
+'links' Text
+' ' Text.Whitespace
+'=' Operator.Word
+' ' Text.Whitespace
+'"' Literal.String
+'abc' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/ini/test_indented_entries_1.txt b/tests/snippets/ini/test_indented_entries_1.txt
new file mode 100644
index 0000000..a878176
--- /dev/null
+++ b/tests/snippets/ini/test_indented_entries_1.txt
@@ -0,0 +1,16 @@
+---input---
+[section]
+ key1=value1
+ key2=value2
+
+---tokens---
+'[section]' Keyword
+'\n ' Text.Whitespace
+'key1' Name.Attribute
+'=' Operator
+'value1' Literal.String
+'\n ' Text.Whitespace
+'key2' Name.Attribute
+'=' Operator
+'value2' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/ini/test_indented_entries_2.txt b/tests/snippets/ini/test_indented_entries_2.txt
new file mode 100644
index 0000000..bd7c882
--- /dev/null
+++ b/tests/snippets/ini/test_indented_entries_2.txt
@@ -0,0 +1,20 @@
+---input---
+[section]
+ key1 = value1
+ key2 = value2
+
+---tokens---
+'[section]' Keyword
+'\n ' Text.Whitespace
+'key1' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'value1' Literal.String
+'\n ' Text.Whitespace
+'key2' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'value2' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/ini/test_indented_entries_3.txt b/tests/snippets/ini/test_indented_entries_3.txt
new file mode 100644
index 0000000..4a228af
--- /dev/null
+++ b/tests/snippets/ini/test_indented_entries_3.txt
@@ -0,0 +1,20 @@
+---input---
+[section]
+ key 1 = value1
+ key 2 = value2
+
+---tokens---
+'[section]' Keyword
+'\n ' Text.Whitespace
+'key 1' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'value1' Literal.String
+'\n ' Text.Whitespace
+'key 2' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'value2' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/j/test_deal_operator.txt b/tests/snippets/j/test_deal_operator.txt
new file mode 100644
index 0000000..0156b6d
--- /dev/null
+++ b/tests/snippets/j/test_deal_operator.txt
@@ -0,0 +1,8 @@
+---input---
+3?10
+
+---tokens---
+'3' Literal.Number.Integer
+'?' Operator
+'10' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/j/test_deal_operator_fixed_seed.txt b/tests/snippets/j/test_deal_operator_fixed_seed.txt
new file mode 100644
index 0000000..0a0bd77
--- /dev/null
+++ b/tests/snippets/j/test_deal_operator_fixed_seed.txt
@@ -0,0 +1,9 @@
+---input---
+3?.10
+
+---tokens---
+'3' Literal.Number.Integer
+'?' Operator
+'.' Operator
+'10' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/java/test_default.txt b/tests/snippets/java/test_default.txt
new file mode 100644
index 0000000..f24fa42
--- /dev/null
+++ b/tests/snippets/java/test_default.txt
@@ -0,0 +1,36 @@
+---input---
+switch (x) {
+ case 1: break;
+ default: break;
+}
+
+---tokens---
+'switch' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'x' Name
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'case' Keyword
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+':' Punctuation
+' ' Text.Whitespace
+'break' Keyword
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'default' Keyword
+':' Punctuation
+' ' Text.Whitespace
+'break' Keyword
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/java/test_enhanced_for.txt b/tests/snippets/java/test_enhanced_for.txt
new file mode 100644
index 0000000..d2a8091
--- /dev/null
+++ b/tests/snippets/java/test_enhanced_for.txt
@@ -0,0 +1,22 @@
+---input---
+label:
+for(String var2: var1) {}
+
+---tokens---
+'label' Name.Label
+':' Punctuation
+'\n' Text.Whitespace
+
+'for' Keyword
+'(' Punctuation
+'String' Name
+' ' Text.Whitespace
+'var2' Name
+':' Punctuation
+' ' Text.Whitespace
+'var1' Name
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/java/test_multiline_string.txt b/tests/snippets/java/test_multiline_string.txt
new file mode 100644
index 0000000..c7325b5
--- /dev/null
+++ b/tests/snippets/java/test_multiline_string.txt
@@ -0,0 +1,185 @@
+---input---
+public class Quine {
+ public static void main(String[] args) {
+ String textBlockQuotes = new String(new char[]{'"', '"', '"'});
+ char newLine = 10;
+ String teststring = "test123\n";
+ String source = """
+public class Quine {
+ public static void main(String[] args) {
+ String textBlockQuotes = new String(new char[]{'"', '"', '"'});
+ char newLine = 10;
+ String teststringinside = "hello my name is...\n\r";
+ String source = %s;
+ System.out.println(source.formatted(textBlockQuotes + newLine + source + textBlockQuotes));
+ }
+}
+""";
+ System.out.println(source.formatted(textBlockQuotes + newLine + source + textBlockQuotes));
+ String teststring2 = "Hello\n";
+ }
+}
+
+---tokens---
+'public' Keyword.Declaration
+' ' Text.Whitespace
+'class' Keyword.Declaration
+' ' Text
+'Quine' Name.Class
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'\t' Text.Whitespace
+'public' Keyword.Declaration
+' ' Text.Whitespace
+'static' Keyword.Declaration
+' ' Text.Whitespace
+'void' Keyword.Type
+' ' Text.Whitespace
+'main' Name.Function
+'(' Punctuation
+'String' Name
+'[' Operator
+']' Operator
+' ' Text.Whitespace
+'args' Name
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'\t\t' Text.Whitespace
+'String' Name
+' ' Text.Whitespace
+'textBlockQuotes' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'new' Keyword
+' ' Text.Whitespace
+'String' Name
+'(' Punctuation
+'new' Keyword
+' ' Text.Whitespace
+'char' Keyword.Type
+'[' Operator
+']' Operator
+'{' Punctuation
+'\'"\'' Literal.String.Char
+',' Punctuation
+' ' Text.Whitespace
+'\'"\'' Literal.String.Char
+',' Punctuation
+' ' Text.Whitespace
+'\'"\'' Literal.String.Char
+'}' Punctuation
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'\t\t' Text.Whitespace
+'char' Keyword.Type
+' ' Text.Whitespace
+'newLine' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'10' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
+
+'\t\t' Text.Whitespace
+'String' Name
+' ' Text.Whitespace
+'teststring' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"' Literal.String
+'test123' Literal.String
+'\\' Literal.String
+'n' Literal.String
+'"' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+'\t ' Text.Whitespace
+'String' Name
+' ' Text.Whitespace
+'source' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"""\n' Literal.String
+
+"public class Quine {\n\tpublic static void main(String[] args) {\n\t\tString textBlockQuotes = new String(new char[]{'" Literal.String
+'"' Literal.String
+"', '" Literal.String
+'"' Literal.String
+"', '" Literal.String
+'"' Literal.String
+"'});\n\t\tchar newLine = 10;\n\t\tString teststringinside = " Literal.String
+'"' Literal.String
+'hello my name is...' Literal.String
+'\\' Literal.String
+'n' Literal.String
+'\\' Literal.String
+'r' Literal.String
+'"' Literal.String
+';\n\t\tString source = %s;\n\t\tSystem.out.println(source.formatted(textBlockQuotes + newLine + source + textBlockQuotes));\n\t}\n}\n' Literal.String
+
+'"""' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'System' Name
+'.' Punctuation
+'out' Name.Attribute
+'.' Punctuation
+'println' Name.Attribute
+'(' Punctuation
+'source' Name
+'.' Punctuation
+'formatted' Name.Attribute
+'(' Punctuation
+'textBlockQuotes' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'newLine' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'source' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'textBlockQuotes' Name
+')' Punctuation
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'\t' Text.Whitespace
+'String' Name
+' ' Text.Whitespace
+'teststring2' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"' Literal.String
+'Hello' Literal.String
+'\\' Literal.String
+'n' Literal.String
+'"' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+'\t' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/java/test_multiline_string_only.txt b/tests/snippets/java/test_multiline_string_only.txt
new file mode 100644
index 0000000..09d54ad
--- /dev/null
+++ b/tests/snippets/java/test_multiline_string_only.txt
@@ -0,0 +1,46 @@
+---input---
+ String source = """
+public class Quine {
+ public static void main(String[] args) {
+ String textBlockQuotes = new String(new char[]{'"', '"', '"'});
+ char newLine = 10;
+ String teststringinside = "hello my name is...\n\r";
+ String source = %s;
+ System.out.println(source.formatted(textBlockQuotes + newLine + source + textBlockQuotes));
+ }
+}
+""";
+
+
+---tokens---
+'\t ' Text.Whitespace
+'String' Name
+' ' Text.Whitespace
+'source' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"""\n' Literal.String
+
+"public class Quine {\n\tpublic static void main(String[] args) {\n\t\tString textBlockQuotes = new String(new char[]{'" Literal.String
+'"' Literal.String
+"', '" Literal.String
+'"' Literal.String
+"', '" Literal.String
+'"' Literal.String
+"'});\n\t\tchar newLine = 10;\n\t\tString teststringinside = " Literal.String
+'"' Literal.String
+'hello my name is...' Literal.String
+'\\' Literal.String
+'n' Literal.String
+'\\' Literal.String
+'r' Literal.String
+'"' Literal.String
+';\n\t\tString source = %s;\n\t\tSystem.out.println(source.formatted(textBlockQuotes + newLine + source + textBlockQuotes));\n\t}\n}\n' Literal.String
+
+'"""' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'\n' Text.Whitespace
diff --git a/tests/snippets/java/test_numeric_literals.txt b/tests/snippets/java/test_numeric_literals.txt
new file mode 100644
index 0000000..1dc933d
--- /dev/null
+++ b/tests/snippets/java/test_numeric_literals.txt
@@ -0,0 +1,34 @@
+---input---
+0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D
+
+---tokens---
+'0' Literal.Number.Integer
+' ' Text.Whitespace
+'5L' Literal.Number.Integer
+' ' Text.Whitespace
+'9__542_72l' Literal.Number.Integer
+' ' Text.Whitespace
+'0xbEEf' Literal.Number.Hex
+' ' Text.Whitespace
+'0X9_A' Literal.Number.Hex
+' ' Text.Whitespace
+'0_35' Literal.Number.Oct
+' ' Text.Whitespace
+'01' Literal.Number.Oct
+' ' Text.Whitespace
+'0b0___101_0' Literal.Number.Bin
+' ' Text.Whitespace
+'0.' Literal.Number.Float
+' ' Text.Whitespace
+'.7_17F' Literal.Number.Float
+' ' Text.Whitespace
+'3e-1_3d' Literal.Number.Float
+' ' Text.Whitespace
+'1f' Literal.Number.Float
+' ' Text.Whitespace
+'6_01.9e+3' Literal.Number.Float
+' ' Text.Whitespace
+'0x.1Fp3' Literal.Number.Float
+' ' Text.Whitespace
+'0XEP8D' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/java/test_record.txt b/tests/snippets/java/test_record.txt
new file mode 100644
index 0000000..f4ca08b
--- /dev/null
+++ b/tests/snippets/java/test_record.txt
@@ -0,0 +1,67 @@
+---input---
+public record RecordTest() {}
+public static record RecordTest() {}
+record Person(String firstName, String lastName) {}
+String[] record = csvReader.getValues();
+
+
+---tokens---
+'public' Keyword.Declaration
+' ' Text.Whitespace
+'record' Keyword.Declaration
+' ' Text
+'RecordTest' Name.Class
+'(' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
+
+'public' Keyword.Declaration
+' ' Text.Whitespace
+'static' Keyword.Declaration
+' ' Text.Whitespace
+'record' Keyword.Declaration
+' ' Text
+'RecordTest' Name.Class
+'(' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
+
+'record' Keyword.Declaration
+' ' Text
+'Person' Name.Class
+'(' Punctuation
+'String' Name
+' ' Text.Whitespace
+'firstName' Name
+',' Punctuation
+' ' Text.Whitespace
+'String' Name
+' ' Text.Whitespace
+'lastName' Name
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
+
+'String' Name
+'[' Operator
+']' Operator
+' ' Text.Whitespace
+'record' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'csvReader' Name
+'.' Punctuation
+'getValues' Name.Attribute
+'(' Punctuation
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/js/super.txt b/tests/snippets/js/super.txt
new file mode 100644
index 0000000..1af2ad2
--- /dev/null
+++ b/tests/snippets/js/super.txt
@@ -0,0 +1,72 @@
+---input---
+super(member1, member2)
+
+super(member1,member2)
+
+super(member1,
+member2)
+
+super (member1, member2)
+
+super (member1,member2)
+
+super (member1,
+member2)
+
+---tokens---
+'super' Keyword
+'(' Punctuation
+'member1' Name.Other
+',' Punctuation
+' ' Text.Whitespace
+'member2' Name.Other
+')' Punctuation
+'\n\n' Text.Whitespace
+
+'super' Keyword
+'(' Punctuation
+'member1' Name.Other
+',' Punctuation
+'member2' Name.Other
+')' Punctuation
+'\n\n' Text.Whitespace
+
+'super' Keyword
+'(' Punctuation
+'member1' Name.Other
+',' Punctuation
+'\n' Text.Whitespace
+
+'member2' Name.Other
+')' Punctuation
+'\n\n' Text.Whitespace
+
+'super' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'member1' Name.Other
+',' Punctuation
+' ' Text.Whitespace
+'member2' Name.Other
+')' Punctuation
+'\n\n' Text.Whitespace
+
+'super' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'member1' Name.Other
+',' Punctuation
+'member2' Name.Other
+')' Punctuation
+'\n\n' Text.Whitespace
+
+'super' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'member1' Name.Other
+',' Punctuation
+'\n' Text.Whitespace
+
+'member2' Name.Other
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/jslt/test_sample.txt b/tests/snippets/jslt/test_sample.txt
new file mode 100644
index 0000000..73ad3fd
--- /dev/null
+++ b/tests/snippets/jslt/test_sample.txt
@@ -0,0 +1,83 @@
+---input---
+import "transforms/yellow.jslt" as yellow
+
+// Known valid types
+let valid-types = [ "SomeType" ]
+
+def foobar(arg) $arg.foobar
+
+{
+ "foobar": foobar(.),
+ "is-valid": contains(.type, $valid-types),
+ *: .
+}
+
+---tokens---
+'import' Keyword.Namespace
+' ' Text.Whitespace
+'"transforms/yellow.jslt"' Literal.String.Symbol
+' ' Text.Whitespace
+'as' Keyword.Namespace
+' ' Text.Whitespace
+'yellow' Name.Namespace
+'\n\n' Text.Whitespace
+
+'// Known valid types\n' Comment.Single
+
+'let' Keyword.Declaration
+' ' Text.Whitespace
+'valid-types' Name.Variable
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+' ' Text.Whitespace
+'"SomeType"' Literal.String.Double
+' ' Text.Whitespace
+']' Punctuation
+'\n\n' Text.Whitespace
+
+'def' Keyword.Declaration
+' ' Text.Whitespace
+'foobar' Name.Function
+'(' Punctuation
+'arg' Name.Variable
+')' Punctuation
+' ' Text.Whitespace
+'$arg' Name.Variable
+'.' Operator
+'foobar' Name
+'\n\n' Text.Whitespace
+
+'{' Punctuation
+'\n ' Text.Whitespace
+'"foobar"' Literal.String.Double
+':' Punctuation
+' ' Text.Whitespace
+'foobar' Name
+'(' Punctuation
+'.' Operator
+')' Punctuation
+',' Punctuation
+'\n ' Text.Whitespace
+'"is-valid"' Literal.String.Double
+':' Punctuation
+' ' Text.Whitespace
+'contains' Name.Builtin
+'(' Punctuation
+'.' Operator
+'type' Name
+',' Punctuation
+' ' Text.Whitespace
+'$valid-types' Name.Variable
+')' Punctuation
+',' Punctuation
+'\n ' Text.Whitespace
+'*' Operator
+':' Punctuation
+' ' Text.Whitespace
+'.' Operator
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/json/test_basic.txt b/tests/snippets/json/test_basic.txt
new file mode 100644
index 0000000..f93b91b
--- /dev/null
+++ b/tests/snippets/json/test_basic.txt
@@ -0,0 +1,30 @@
+---input---
+{"foo": "bar", "foo2": [1, 2, 3], "\u0123": "\u0123"}
+
+---tokens---
+'{' Punctuation
+'"foo"' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'"bar"' Literal.String.Double
+',' Punctuation
+' ' Text.Whitespace
+'"foo2"' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'1' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'3' Literal.Number.Integer
+'],' Punctuation
+' ' Text.Whitespace
+'"\\u0123"' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'"\\u0123"' Literal.String.Double
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/json/test_basic_bare.txt b/tests/snippets/json/test_basic_bare.txt
new file mode 100644
index 0000000..785d264
--- /dev/null
+++ b/tests/snippets/json/test_basic_bare.txt
@@ -0,0 +1,23 @@
+---input---
+"foo": "bar", "foo2": [1, 2, 3]
+
+---tokens---
+'"foo"' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'"bar"' Literal.String.Double
+',' Punctuation
+' ' Text.Whitespace
+'"foo2"' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'1' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'3' Literal.Number.Integer
+']' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia-repl/test_repl.txt b/tests/snippets/julia-repl/test_repl.txt
new file mode 100644
index 0000000..183de17
--- /dev/null
+++ b/tests/snippets/julia-repl/test_repl.txt
@@ -0,0 +1,51 @@
+Tests separating Julia's commands from output in REPL-like code blocks
+
+---input---
+julia> f(x) = sin(π/2x)
+f (generic function with 1 method)
+
+julia> @. f(1:2)
+2-element Vector{Float64}:
+ 1.0
+ 0.7071067811865475
+
+---tokens---
+'julia>' Generic.Prompt
+' ' Text.Whitespace
+'f' Name
+'(' Punctuation
+'x' Name
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'sin' Name
+'(' Punctuation
+'π' Name.Builtin
+'/' Operator
+'2' Literal.Number.Integer
+'x' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+'f (generic function with 1 method)\n' Generic.Output
+
+'\n' Generic.Output
+
+'julia>' Generic.Prompt
+' ' Text.Whitespace
+'@.' Name.Decorator
+' ' Text.Whitespace
+'f' Name
+'(' Punctuation
+'1' Literal.Number.Integer
+':' Operator
+'2' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
+
+'2-element Vector{Float64}:\n' Generic.Output
+
+' 1.0\n' Generic.Output
+
+' 0.7071067811865475\n' Generic.Output
diff --git a/tests/snippets/julia/test_keywords.txt b/tests/snippets/julia/test_keywords.txt
new file mode 100644
index 0000000..10166d6
--- /dev/null
+++ b/tests/snippets/julia/test_keywords.txt
@@ -0,0 +1,101 @@
+# Test keywords are identified
+
+---input---
+mutable struct MutableType end
+struct ImmutableType end
+abstract type AbstractMyType end
+primitive type MyPrimitive 32 end
+(abstract, mutable, type) = true, π, missing
+
+abstract type AbstractMyType end
+primitive type MyPrimitive 32 end
+mutable struct MutableType end
+
+---tokens---
+'mutable' Keyword
+' ' Text.Whitespace
+'struct' Keyword
+' ' Text
+'MutableType' Keyword.Type
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'struct' Keyword
+' ' Text
+'ImmutableType' Keyword.Type
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'abstract' Keyword
+' ' Text.Whitespace
+'type' Keyword
+' ' Text
+'AbstractMyType' Keyword.Type
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'primitive' Keyword
+' ' Text.Whitespace
+'type' Keyword
+' ' Text
+'MyPrimitive' Keyword.Type
+' ' Text.Whitespace
+'32' Literal.Number.Integer
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'(' Punctuation
+'abstract' Name
+',' Punctuation
+' ' Text.Whitespace
+'mutable' Name
+',' Punctuation
+' ' Text.Whitespace
+'type' Name
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'true' Name.Builtin
+',' Punctuation
+' ' Text.Whitespace
+'π' Name.Builtin
+',' Punctuation
+' ' Text.Whitespace
+'missing' Name.Builtin
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'abstract' Keyword
+' ' Text.Whitespace
+'type' Keyword
+' ' Text
+'AbstractMyType' Keyword.Type
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'primitive' Keyword
+' \t' Text.Whitespace
+'type' Keyword
+' ' Text
+'MyPrimitive' Keyword.Type
+' ' Text.Whitespace
+'32' Literal.Number.Integer
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'mutable' Keyword
+' ' Text.Whitespace
+'struct' Keyword
+' ' Text
+'MutableType' Keyword.Type
+' ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_macros.txt b/tests/snippets/julia/test_macros.txt
new file mode 100644
index 0000000..43d67e0
--- /dev/null
+++ b/tests/snippets/julia/test_macros.txt
@@ -0,0 +1,56 @@
+# Test that macros are parsed, including ones which are defined as symbols
+
+---input---
+@generated function
+@. a + b
+@~ a + b
+@± a + b
+@mymacro(a, b)
+@+¹ᵀ a
+
+---tokens---
+'@generated' Name.Decorator
+' ' Text.Whitespace
+'function' Keyword
+'\n' Text.Whitespace
+
+'@.' Name.Decorator
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'b' Name
+'\n' Text.Whitespace
+
+'@~' Name.Decorator
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'b' Name
+'\n' Text.Whitespace
+
+'@±' Name.Decorator
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'b' Name
+'\n' Text.Whitespace
+
+'@mymacro' Name.Decorator
+'(' Punctuation
+'a' Name
+',' Punctuation
+' ' Text.Whitespace
+'b' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+'@+¹ᵀ' Name.Decorator
+' ' Text.Whitespace
+'a' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_names.txt b/tests/snippets/julia/test_names.txt
new file mode 100644
index 0000000..62c0b55
--- /dev/null
+++ b/tests/snippets/julia/test_names.txt
@@ -0,0 +1,148 @@
+# Test that the range of Julia variable names are correctly identified
+
+---input---
+a # single character variable
+a_simple_name
+_leading_underscore
+5implicit_mul
+6_more_mul
+nums1
+nums_2
+nameswith!
+multiple!!
+embedded!_inthemiddle
+embed!1
+prime_suffix′
+for_each # starts with keyword substring
+
+# variables with characters > \u00A1
+ð # category Ll
+Aʺ # category Lm -- \U02BA (MODIFIER LETTER DOUBLE PRIME), not \U2033 (DOUBLE PRIME)
+א # category Lo
+Ð # category Lu
+A̅ # category Mn -- \U0305 (COMBINING OVERLINE)
+ⅿ # category Nl -- \U217F (SMALL ROMAN NUMERAL ONE THOUSAND)
+A₁ # category No
+A² # category No
+€ # category Sc
+© # category So
+
+# number-like names
+𝟙 # category Nd
+𝟏 # category Nd
+
+---tokens---
+'a' Name
+' ' Text.Whitespace
+'# single character variable' Comment
+'\n' Text.Whitespace
+
+'a_simple_name' Name
+'\n' Text.Whitespace
+
+'_leading_underscore' Name
+'\n' Text.Whitespace
+
+'5' Literal.Number.Integer
+'implicit_mul' Name
+'\n' Text.Whitespace
+
+'6' Literal.Number.Integer
+'_more_mul' Name
+'\n' Text.Whitespace
+
+'nums1' Name
+'\n' Text.Whitespace
+
+'nums_2' Name
+'\n' Text.Whitespace
+
+'nameswith!' Name
+'\n' Text.Whitespace
+
+'multiple!!' Name
+'\n' Text.Whitespace
+
+'embedded!_inthemiddle' Name
+'\n' Text.Whitespace
+
+'embed!1' Name
+'\n' Text.Whitespace
+
+'prime_suffix′' Name
+'\n' Text.Whitespace
+
+'for_each' Name
+' ' Text.Whitespace
+'# starts with keyword substring' Comment
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# variables with characters > \\u00A1' Comment
+'\n' Text.Whitespace
+
+'ð' Name
+' ' Text.Whitespace
+'# category Ll' Comment
+'\n' Text.Whitespace
+
+'Aʺ' Name
+' ' Text.Whitespace
+'# category Lm -- \\U02BA (MODIFIER LETTER DOUBLE PRIME), not \\U2033 (DOUBLE PRIME)' Comment
+'\n' Text.Whitespace
+
+'א' Name
+' ' Text.Whitespace
+'# category Lo' Comment
+'\n' Text.Whitespace
+
+'Ð' Name
+' ' Text.Whitespace
+'# category Lu' Comment
+'\n' Text.Whitespace
+
+'A̅' Name
+' ' Text.Whitespace
+'# category Mn -- \\U0305 (COMBINING OVERLINE)' Comment
+'\n' Text.Whitespace
+
+'ⅿ' Name
+' ' Text.Whitespace
+'# category Nl -- \\U217F (SMALL ROMAN NUMERAL ONE THOUSAND)' Comment
+'\n' Text.Whitespace
+
+'A₁' Name
+' ' Text.Whitespace
+'# category No' Comment
+'\n' Text.Whitespace
+
+'A²' Name
+' ' Text.Whitespace
+'# category No' Comment
+'\n' Text.Whitespace
+
+'€' Name
+' ' Text.Whitespace
+'# category Sc' Comment
+'\n' Text.Whitespace
+
+'©' Name
+' ' Text.Whitespace
+'# category So' Comment
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# number-like names' Comment
+'\n' Text.Whitespace
+
+'𝟙' Name
+' ' Text.Whitespace
+'# category Nd' Comment
+'\n' Text.Whitespace
+
+'𝟏' Name
+' ' Text.Whitespace
+'# category Nd' Comment
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_numbers.txt b/tests/snippets/julia/test_numbers.txt
new file mode 100644
index 0000000..66f1fd0
--- /dev/null
+++ b/tests/snippets/julia/test_numbers.txt
@@ -0,0 +1,261 @@
+# Tests identification of number forms
+
+---input---
+# floats
+ 1e1 1e+1 1e-1
+1.1e1 1.1e+1 1.1e-1 .1e1 .1_1e1 1_1.1e1 1.1_1e1 1.1_11e1
+1.1E1 1.1E+1 1.1E-1 .1E1 .1_1E1 1_1.1E1 1.1_1E1 1.1_11E1
+1.1f1 1.1f+1 1.1f-1 .1f1 .1_1f1 1_1.1f1 1.1_1f1 1.1_11f1
+1E1 1E+1 1E-1
+1f1 1f+1 1f-1
+.1 1. 1.1 1.1_1 1.1_11 .1_1 .1_11 1_1.1_1
+# hex floats
+0x1p1 0xa_bp10 0x01_ap11 0x01_abp1
+0x1.1p1 0xA.Bp10 0x0.1_Ap9 0x0_1.Ap1 0x0_1.A_Bp9
+
+# integers
+1 01 10_1 10_11
+
+# non-decimal
+0xf 0xf_0 0xfff_000
+0o7 0o7_0 0o777_000
+0b1 0b1_0 0b111_000
+
+# invalid in Julia - out of range values
+0xg 0o8 0b2 0x1pA
+# invalid in Julia - no trailing underscores
+1_ 1.1_ 0xf_ 0o7_ 0b1_ 0xF_p1
+# parsed as juxtaposed numeral + variable in Julia (no underscores in exponents)
+1e1_1 1E1_1 1f1_1 0xfp1_1
+
+# not floats -- range-like expression parts
+1..1 ..1 1..
+
+---tokens---
+'# floats' Comment
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'1e1' Literal.Number.Float
+' ' Text.Whitespace
+'1e+1' Literal.Number.Float
+' ' Text.Whitespace
+'1e-1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.1e1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1e+1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1e-1' Literal.Number.Float
+' ' Text.Whitespace
+'.1e1' Literal.Number.Float
+' ' Text.Whitespace
+'.1_1e1' Literal.Number.Float
+' ' Text.Whitespace
+'1_1.1e1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_1e1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_11e1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.1E1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1E+1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1E-1' Literal.Number.Float
+' ' Text.Whitespace
+'.1E1' Literal.Number.Float
+' ' Text.Whitespace
+'.1_1E1' Literal.Number.Float
+' ' Text.Whitespace
+'1_1.1E1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_1E1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_11E1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.1f1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1f+1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1f-1' Literal.Number.Float
+' ' Text.Whitespace
+'.1f1' Literal.Number.Float
+' ' Text.Whitespace
+'.1_1f1' Literal.Number.Float
+' ' Text.Whitespace
+'1_1.1f1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_1f1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_11f1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1E1' Literal.Number.Float
+' ' Text.Whitespace
+'1E+1' Literal.Number.Float
+' ' Text.Whitespace
+'1E-1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1f1' Literal.Number.Float
+' ' Text.Whitespace
+'1f+1' Literal.Number.Float
+' ' Text.Whitespace
+'1f-1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'.1' Literal.Number.Float
+' ' Text.Whitespace
+'1.' Literal.Number.Float
+' ' Text.Whitespace
+'1.1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_1' Literal.Number.Float
+' ' Text.Whitespace
+'1.1_11' Literal.Number.Float
+' ' Text.Whitespace
+'.1_1' Literal.Number.Float
+' ' Text.Whitespace
+'.1_11' Literal.Number.Float
+' ' Text.Whitespace
+'1_1.1_1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'# hex floats' Comment
+'\n' Text.Whitespace
+
+'0x1p1' Literal.Number.Float
+' ' Text.Whitespace
+'0xa_bp10' Literal.Number.Float
+' ' Text.Whitespace
+'0x01_ap11' Literal.Number.Float
+' ' Text.Whitespace
+'0x01_abp1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'0x1.1p1' Literal.Number.Float
+' ' Text.Whitespace
+'0xA.Bp10' Literal.Number.Float
+' ' Text.Whitespace
+'0x0.1_Ap9' Literal.Number.Float
+' ' Text.Whitespace
+'0x0_1.Ap1' Literal.Number.Float
+' ' Text.Whitespace
+'0x0_1.A_Bp9' Literal.Number.Float
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# integers' Comment
+'\n' Text.Whitespace
+
+'1' Literal.Number.Integer
+' ' Text.Whitespace
+'01' Literal.Number.Integer
+' ' Text.Whitespace
+'10_1' Literal.Number.Integer
+' ' Text.Whitespace
+'10_11' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# non-decimal' Comment
+'\n' Text.Whitespace
+
+'0xf' Literal.Number.Hex
+' ' Text.Whitespace
+'0xf_0' Literal.Number.Hex
+' ' Text.Whitespace
+'0xfff_000' Literal.Number.Hex
+'\n' Text.Whitespace
+
+'0o7' Literal.Number.Oct
+' ' Text.Whitespace
+'0o7_0' Literal.Number.Oct
+' ' Text.Whitespace
+'0o777_000' Literal.Number.Oct
+'\n' Text.Whitespace
+
+'0b1' Literal.Number.Bin
+' ' Text.Whitespace
+'0b1_0' Literal.Number.Bin
+' ' Text.Whitespace
+'0b111_000' Literal.Number.Bin
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# invalid in Julia - out of range values' Comment
+'\n' Text.Whitespace
+
+'0' Literal.Number.Integer
+'xg' Name
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+'o8' Name
+' ' Text.Whitespace
+'0' Literal.Number.Integer
+'b2' Name
+' ' Text.Whitespace
+'0x1' Literal.Number.Hex
+'pA' Name
+'\n' Text.Whitespace
+
+'# invalid in Julia - no trailing underscores' Comment
+'\n' Text.Whitespace
+
+'1' Literal.Number.Integer
+'_' Name
+' ' Text.Whitespace
+'1.1' Literal.Number.Float
+'_' Name
+' ' Text.Whitespace
+'0xf' Literal.Number.Hex
+'_' Name
+' ' Text.Whitespace
+'0o7' Literal.Number.Oct
+'_' Name
+' ' Text.Whitespace
+'0b1' Literal.Number.Bin
+'_' Name
+' ' Text.Whitespace
+'0xF' Literal.Number.Hex
+'_p1' Name
+'\n' Text.Whitespace
+
+'# parsed as juxtaposed numeral + variable in Julia (no underscores in exponents)' Comment
+'\n' Text.Whitespace
+
+'1e1' Literal.Number.Float
+'_1' Name
+' ' Text.Whitespace
+'1E1' Literal.Number.Float
+'_1' Name
+' ' Text.Whitespace
+'1f1' Literal.Number.Float
+'_1' Name
+' ' Text.Whitespace
+'0xfp1' Literal.Number.Float
+'_1' Name
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# not floats -- range-like expression parts' Comment
+'\n' Text.Whitespace
+
+'1' Literal.Number.Integer
+'..' Operator
+'1' Literal.Number.Integer
+' ' Text.Whitespace
+'..' Operator
+'1' Literal.Number.Integer
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'..' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_operators.txt b/tests/snippets/julia/test_operators.txt
new file mode 100644
index 0000000..c3c0f9f
--- /dev/null
+++ b/tests/snippets/julia/test_operators.txt
@@ -0,0 +1,172 @@
+# Test that operators --- dotted and unicode --- are identified correctly.
+
+---input---
+a += b.c
+a .÷= .~b.c
+a = !b ⋆ c!
+a = b ? c : d ⊕ e
+a = √(5)
+a -> (a...) .+ 1
+a \ b
+1..2
+a = a === b
+a <: T
+a >: T
+a::T
+[adjoint]'
+(identity)''
+adjoint'''
+transpose'ᵀ
+suffixed +¹ operator
+suffixed +¹²³ operator
+
+---tokens---
+'a' Name
+' ' Text.Whitespace
+'+=' Operator
+' ' Text.Whitespace
+'b' Name
+'.' Operator
+'c' Name
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'.÷=' Operator
+' ' Text.Whitespace
+'.~' Operator
+'b' Name
+'.' Operator
+'c' Name
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'!' Operator
+'b' Name
+' ' Text.Whitespace
+'⋆' Operator
+' ' Text.Whitespace
+'c!' Name
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'b' Name
+' ' Text.Whitespace
+'?' Operator
+' ' Text.Whitespace
+'c' Name
+' ' Text.Whitespace
+':' Operator
+' ' Text.Whitespace
+'d' Name
+' ' Text.Whitespace
+'⊕' Operator
+' ' Text.Whitespace
+'e' Name
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'√' Operator
+'(' Punctuation
+'5' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'->' Operator
+' ' Text.Whitespace
+'(' Punctuation
+'a' Name
+'...' Operator
+')' Punctuation
+' ' Text.Whitespace
+'.+' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'\\' Operator
+' ' Text.Whitespace
+'b' Name
+'\n' Text.Whitespace
+
+'1' Literal.Number.Integer
+'..' Operator
+'2' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'a' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'===' Operator
+' ' Text.Whitespace
+'b' Name
+'\n' Text.Whitespace
+
+'a' Keyword.Type
+' ' Text.Whitespace
+'<:' Operator
+' ' Text.Whitespace
+'T' Keyword.Type
+'\n' Text.Whitespace
+
+'a' Keyword.Type
+' ' Text.Whitespace
+'>:' Operator
+' ' Text.Whitespace
+'T' Keyword.Type
+'\n' Text.Whitespace
+
+'a' Name
+'::' Operator
+'T' Keyword.Type
+'\n' Text.Whitespace
+
+'[' Punctuation
+'adjoint' Name
+']' Punctuation
+"'" Operator
+'\n' Text.Whitespace
+
+'(' Punctuation
+'identity' Name
+')' Punctuation
+"''" Operator
+'\n' Text.Whitespace
+
+'adjoint' Name
+"'''" Operator
+'\n' Text.Whitespace
+
+'transpose' Name
+"'ᵀ" Operator
+'\n' Text.Whitespace
+
+'suffixed' Name
+' ' Text.Whitespace
+'+¹' Operator
+' ' Text.Whitespace
+'operator' Name
+'\n' Text.Whitespace
+
+'suffixed' Name
+' ' Text.Whitespace
+'+¹²³' Operator
+' ' Text.Whitespace
+'operator' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_strings.txt b/tests/snippets/julia/test_strings.txt
new file mode 100644
index 0000000..d5f91bf
--- /dev/null
+++ b/tests/snippets/julia/test_strings.txt
@@ -0,0 +1,225 @@
+# Tests string forms
+
+---input---
+"global function"
+"An $interpolated variable"
+"An $(a + 1) expression"
+"""a"""
+"""
+global function
+de e f
+"inner string"
+"""
+raw"\\ a \" $interp $(1 + 1) \""
+raw"""
+"inner string"
+$interp
+$(1 + 1)
+"""
+# commented "string"
+
+@sprintf "%0.2f" var
+v"1.0"
+var"#nonstandard#"
+
+r"^[abs]+$"m
+arbi"trary"suff
+arbi"trary"1234
+
+`global function`
+`abc \` \$ $interpolated`
+`abc $(a + 1)`
+```a```
+```
+global function
+"thing" ` \$
+`now` $(now())
+```
+# commented `command`
+
+arbi`trary`suff
+arbi`trary`1234
+
+---tokens---
+'"' Literal.String
+'global function' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'An ' Literal.String
+'$interpolated' Literal.String.Interpol
+' variable' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'An ' Literal.String
+'$' Literal.String.Interpol
+'(' Punctuation
+'a' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+')' Punctuation
+' expression' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'"""' Literal.String
+'a' Literal.String
+'"""' Literal.String
+'\n' Text.Whitespace
+
+'"""' Literal.String
+'\nglobal function\nde e f\n' Literal.String
+
+'"' Literal.String
+'inner string' Literal.String
+'"' Literal.String
+'\n' Literal.String
+
+'"""' Literal.String
+'\n' Text.Whitespace
+
+'raw' Literal.String.Affix
+'"' Literal.String
+'\\\\ a ' Literal.String
+'\\"' Literal.String.Escape
+' $interp $(1 + 1) ' Literal.String
+'\\"' Literal.String.Escape
+'"' Literal.String
+'\n' Text.Whitespace
+
+'raw' Literal.String.Affix
+'"""' Literal.String
+'\n"inner string"\n$interp\n$(1 + 1)\n' Literal.String
+
+'"""' Literal.String
+'\n' Text.Whitespace
+
+'# commented "string"' Comment
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'@sprintf' Name.Decorator
+' ' Text.Whitespace
+'"' Literal.String
+'%0.2f' Literal.String.Interpol
+'"' Literal.String
+' ' Text.Whitespace
+'var' Name
+'\n' Text.Whitespace
+
+'v' Literal.String.Affix
+'"' Literal.String
+'1.0' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'var' Literal.String.Affix
+'"' Literal.String
+'#nonstandard#' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'r' Literal.String.Affix
+'"' Literal.String.Regex
+'^[abs]+$' Literal.String.Regex
+'"' Literal.String.Regex
+'m' Literal.String.Affix
+'\n' Text.Whitespace
+
+'arbi' Literal.String.Affix
+'"' Literal.String
+'trary' Literal.String
+'"' Literal.String
+'suff' Literal.String.Affix
+'\n' Text.Whitespace
+
+'arbi' Literal.String.Affix
+'"' Literal.String
+'trary' Literal.String
+'"' Literal.String
+'1234' Literal.String.Affix
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'`' Literal.String.Backtick
+'global function' Literal.String.Backtick
+'`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'`' Literal.String.Backtick
+'abc ' Literal.String.Backtick
+'\\`' Literal.String.Escape
+' ' Literal.String.Backtick
+'\\$' Literal.String.Escape
+' ' Literal.String.Backtick
+'$interpolated' Literal.String.Interpol
+'`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'`' Literal.String.Backtick
+'abc ' Literal.String.Backtick
+'$' Literal.String.Interpol
+'(' Punctuation
+'a' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+')' Punctuation
+'`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'```' Literal.String.Backtick
+'a' Literal.String.Backtick
+'```' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'```' Literal.String.Backtick
+'\nglobal function\n"thing" ' Literal.String.Backtick
+'`' Literal.String.Backtick
+' ' Literal.String.Backtick
+'\\$' Literal.String.Escape
+'\n' Literal.String.Backtick
+
+'`' Literal.String.Backtick
+'now' Literal.String.Backtick
+'`' Literal.String.Backtick
+' ' Literal.String.Backtick
+'$' Literal.String.Interpol
+'(' Punctuation
+'now' Name
+'(' Punctuation
+')' Punctuation
+')' Punctuation
+'\n' Literal.String.Backtick
+
+'```' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'# commented `command`' Comment
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'arbi' Literal.String.Affix
+'`' Literal.String.Backtick
+'trary' Literal.String.Backtick
+'`' Literal.String.Backtick
+'suff' Literal.String.Affix
+'\n' Text.Whitespace
+
+'arbi' Literal.String.Affix
+'`' Literal.String.Backtick
+'trary' Literal.String.Backtick
+'`' Literal.String.Backtick
+'1234' Literal.String.Affix
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_symbols.txt b/tests/snippets/julia/test_symbols.txt
new file mode 100644
index 0000000..37b4f83
--- /dev/null
+++ b/tests/snippets/julia/test_symbols.txt
@@ -0,0 +1,78 @@
+# Tests that symbols are parsed as special literals
+
+---input---
+:abc_123
+:abc_def
+:α
+Val{:mysymbol}
+
+# non-symbols
+a:b
+1:b
+1.:b
+a::T
+a<:T
+a>:T
+UInt(1):UInt(2)
+
+---tokens---
+':abc_123' Literal.String.Symbol
+'\n' Text.Whitespace
+
+':abc_def' Literal.String.Symbol
+'\n' Text.Whitespace
+
+':α' Literal.String.Symbol
+'\n' Text.Whitespace
+
+'Val' Keyword.Type
+'{' Punctuation
+':mysymbol' Literal.String.Symbol
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'# non-symbols' Comment
+'\n' Text.Whitespace
+
+'a' Name
+':' Operator
+'b' Name
+'\n' Text.Whitespace
+
+'1' Literal.Number.Integer
+':' Operator
+'b' Name
+'\n' Text.Whitespace
+
+'1.' Literal.Number.Float
+':' Operator
+'b' Name
+'\n' Text.Whitespace
+
+'a' Name
+'::' Operator
+'T' Keyword.Type
+'\n' Text.Whitespace
+
+'a' Keyword.Type
+'<:' Operator
+'T' Keyword.Type
+'\n' Text.Whitespace
+
+'a' Keyword.Type
+'>:' Operator
+'T' Keyword.Type
+'\n' Text.Whitespace
+
+'UInt' Keyword.Type
+'(' Punctuation
+'1' Literal.Number.Integer
+')' Punctuation
+':' Operator
+'UInt' Keyword.Type
+'(' Punctuation
+'2' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_types.txt b/tests/snippets/julia/test_types.txt
new file mode 100644
index 0000000..16267fd
--- /dev/null
+++ b/tests/snippets/julia/test_types.txt
@@ -0,0 +1,196 @@
+# Tests identifying names which must be types from context
+
+---input---
+Union{}
+MyType{Nothing, Any}
+f(::Union{T,S}) where S where T = 1
+f(::T) where {T} = 1
+f(::Type{<:T}) = 1
+f(::AT) where AT <: AbstractArray{MyType,1} = 1
+f(::Val{:named}) = 1
+f(::typeof(sin)) = 1
+MyInt <: Integer
+Number >: MyInt
+AT{T,1} <: B
+B>:AT{T,1}
+A <: f(B)
+g(C) <: T
+
+---tokens---
+'Union' Keyword.Type
+'{' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
+
+'MyType' Keyword.Type
+'{' Punctuation
+'Nothing' Keyword.Type
+',' Punctuation
+' ' Text.Whitespace
+'Any' Keyword.Type
+'}' Punctuation
+'\n' Text.Whitespace
+
+'f' Name
+'(' Punctuation
+'::' Operator
+'Union' Keyword.Type
+'{' Punctuation
+'T' Keyword.Type
+',' Punctuation
+'S' Keyword.Type
+'}' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'where' Keyword
+' ' Text.Whitespace
+'S' Keyword.Type
+' ' Text.Whitespace
+'where' Keyword
+' ' Text.Whitespace
+'T' Keyword.Type
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'f' Name
+'(' Punctuation
+'::' Operator
+'T' Keyword.Type
+')' Punctuation
+' ' Text.Whitespace
+'where' Keyword
+' ' Text.Whitespace
+'{' Punctuation
+'T' Keyword.Type
+'}' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'f' Name
+'(' Punctuation
+'::' Operator
+'Type' Keyword.Type
+'{' Punctuation
+'<:' Operator
+'T' Keyword.Type
+'}' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'f' Name
+'(' Punctuation
+'::' Operator
+'AT' Keyword.Type
+')' Punctuation
+' ' Text.Whitespace
+'where' Keyword
+' ' Text.Whitespace
+'AT' Keyword.Type
+' ' Text.Whitespace
+'<:' Operator
+' ' Text.Whitespace
+'AbstractArray' Keyword.Type
+'{' Punctuation
+'MyType' Keyword.Type
+',' Punctuation
+'1' Literal.Number.Integer
+'}' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'f' Name
+'(' Punctuation
+'::' Operator
+'Val' Keyword.Type
+'{' Punctuation
+':named' Literal.String.Symbol
+'}' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'f' Name
+'(' Punctuation
+'::' Operator
+'typeof' Name
+'(' Punctuation
+'sin' Name
+')' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'MyInt' Keyword.Type
+' ' Text.Whitespace
+'<:' Operator
+' ' Text.Whitespace
+'Integer' Keyword.Type
+'\n' Text.Whitespace
+
+'Number' Keyword.Type
+' ' Text.Whitespace
+'>:' Operator
+' ' Text.Whitespace
+'MyInt' Keyword.Type
+'\n' Text.Whitespace
+
+'AT' Keyword.Type
+'{' Punctuation
+'T' Keyword.Type
+',' Punctuation
+'1' Literal.Number.Integer
+'}' Punctuation
+' ' Text.Whitespace
+'<:' Operator
+' ' Text.Whitespace
+'B' Keyword.Type
+'\n' Text.Whitespace
+
+'B' Keyword.Type
+'>:' Operator
+'AT' Keyword.Type
+'{' Punctuation
+'T' Keyword.Type
+',' Punctuation
+'1' Literal.Number.Integer
+'}' Punctuation
+'\n' Text.Whitespace
+
+'A' Keyword.Type
+' ' Text.Whitespace
+'<:' Operator
+' ' Text.Whitespace
+'f' Name
+'(' Punctuation
+'B' Name
+')' Punctuation
+'\n' Text.Whitespace
+
+'g' Name
+'(' Punctuation
+'C' Name
+')' Punctuation
+' ' Text.Whitespace
+'<:' Operator
+' ' Text.Whitespace
+'T' Keyword.Type
+'\n' Text.Whitespace
diff --git a/tests/snippets/julia/test_unicode.txt b/tests/snippets/julia/test_unicode.txt
new file mode 100644
index 0000000..6b2508a
--- /dev/null
+++ b/tests/snippets/julia/test_unicode.txt
@@ -0,0 +1,37 @@
+# Test that unicode character, √, in an expression is recognized
+
+---input---
+s = √((1/n) * sum(count .^ 2) - mu .^2)
+
+---tokens---
+'s' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'√' Operator
+'(' Punctuation
+'(' Punctuation
+'1' Literal.Number.Integer
+'/' Operator
+'n' Name
+')' Punctuation
+' ' Text.Whitespace
+'*' Operator
+' ' Text.Whitespace
+'sum' Name
+'(' Punctuation
+'count' Name
+' ' Text.Whitespace
+'.^' Operator
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+')' Punctuation
+' ' Text.Whitespace
+'-' Operator
+' ' Text.Whitespace
+'mu' Name
+' ' Text.Whitespace
+'.^' Operator
+'2' Literal.Number.Integer
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_can_cope_generics_in_destructuring.txt b/tests/snippets/kotlin/test_can_cope_generics_in_destructuring.txt
new file mode 100644
index 0000000..8fc7f3d
--- /dev/null
+++ b/tests/snippets/kotlin/test_can_cope_generics_in_destructuring.txt
@@ -0,0 +1,27 @@
+---input---
+val (a: List<Something>, b: Set<Wobble>) =
+
+---tokens---
+'val' Keyword.Declaration
+' ' Text.Whitespace
+'(' Punctuation
+'a' Name.Variable
+':' Punctuation
+' ' Text.Whitespace
+'List' Name
+'<' Operator
+'Something' Name
+'>' Operator
+',' Punctuation
+' ' Text.Whitespace
+'b' Name.Variable
+':' Punctuation
+' ' Text.Whitespace
+'Set' Name
+'<' Operator
+'Wobble' Name
+'>' Operator
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_can_cope_with_backtick_names_in_functions.txt b/tests/snippets/kotlin/test_can_cope_with_backtick_names_in_functions.txt
new file mode 100644
index 0000000..c89c0ef
--- /dev/null
+++ b/tests/snippets/kotlin/test_can_cope_with_backtick_names_in_functions.txt
@@ -0,0 +1,8 @@
+---input---
+fun `wo bble`
+
+---tokens---
+'fun' Keyword.Declaration
+' ' Text.Whitespace
+'`wo bble`' Name.Function
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_can_cope_with_commas_and_dashes_in_backtick_Names.txt b/tests/snippets/kotlin/test_can_cope_with_commas_and_dashes_in_backtick_Names.txt
new file mode 100644
index 0000000..7b2aed7
--- /dev/null
+++ b/tests/snippets/kotlin/test_can_cope_with_commas_and_dashes_in_backtick_Names.txt
@@ -0,0 +1,8 @@
+---input---
+fun `wo,-bble`
+
+---tokens---
+'fun' Keyword.Declaration
+' ' Text.Whitespace
+'`wo,-bble`' Name.Function
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_can_cope_with_destructuring.txt b/tests/snippets/kotlin/test_can_cope_with_destructuring.txt
new file mode 100644
index 0000000..3db6ed9
--- /dev/null
+++ b/tests/snippets/kotlin/test_can_cope_with_destructuring.txt
@@ -0,0 +1,16 @@
+---input---
+val (a, b) =
+
+---tokens---
+'val' Keyword.Declaration
+' ' Text.Whitespace
+'(' Punctuation
+'a' Name.Variable
+',' Punctuation
+' ' Text.Whitespace
+'b' Name.Variable
+')' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_can_cope_with_generics.txt b/tests/snippets/kotlin/test_can_cope_with_generics.txt
new file mode 100644
index 0000000..09eb61f
--- /dev/null
+++ b/tests/snippets/kotlin/test_can_cope_with_generics.txt
@@ -0,0 +1,34 @@
+---input---
+inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {
+
+---tokens---
+'inline' Keyword.Declaration
+' ' Text.Whitespace
+'fun' Keyword.Declaration
+' ' Text.Whitespace
+'<' Operator
+'reified' Keyword
+' ' Text.Whitespace
+'T' Name
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'ContractState' Name
+'>' Operator
+' ' Text.Whitespace
+'VaultService' Name
+'.' Punctuation
+'queryBy' Name.Function
+'(' Punctuation
+')' Punctuation
+':' Punctuation
+' ' Text.Whitespace
+'Vault' Name
+'.' Punctuation
+'Page' Name.Attribute
+'<' Operator
+'T' Name
+'>' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_modifier_keyword.txt b/tests/snippets/kotlin/test_modifier_keyword.txt
new file mode 100644
index 0000000..730c0a5
--- /dev/null
+++ b/tests/snippets/kotlin/test_modifier_keyword.txt
@@ -0,0 +1,18 @@
+---input---
+data class A(val data: String)
+
+---tokens---
+'data' Keyword.Declaration
+' ' Text.Whitespace
+'class' Keyword.Declaration
+' ' Text.Whitespace
+'A' Name.Class
+'(' Punctuation
+'val' Keyword.Declaration
+' ' Text.Whitespace
+'data' Name.Variable
+':' Punctuation
+' ' Text.Whitespace
+'String' Keyword.Type
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_should_cope_with_multiline_comments.txt b/tests/snippets/kotlin/test_should_cope_with_multiline_comments.txt
new file mode 100644
index 0000000..33ec204
--- /dev/null
+++ b/tests/snippets/kotlin/test_should_cope_with_multiline_comments.txt
@@ -0,0 +1,12 @@
+---input---
+"""
+this
+is
+a
+comment"""
+
+---tokens---
+'"""' Literal.String
+'\nthis\nis\na\ncomment' Literal.String
+'"""' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/kotlin/test_string_interpolation.txt b/tests/snippets/kotlin/test_string_interpolation.txt
new file mode 100644
index 0000000..04ea254
--- /dev/null
+++ b/tests/snippets/kotlin/test_string_interpolation.txt
@@ -0,0 +1,35 @@
+---input---
+val something = "something"
+"Here is $something"
+"Here is ${something.toUpperList()}"
+
+---tokens---
+'val' Keyword.Declaration
+' ' Text.Whitespace
+'something' Name.Variable
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"' Literal.String
+'something' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'Here is ' Literal.String
+'$' Literal.String.Interpol
+'something' Name
+'"' Literal.String
+'\n' Text.Whitespace
+
+'"' Literal.String
+'Here is ' Literal.String
+'${' Literal.String.Interpol
+'something' Name
+'.' Punctuation
+'toUpperList' Name.Attribute
+'(' Punctuation
+')' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/less/test_single_line_comments.txt b/tests/snippets/less/test_single_line_comments.txt
new file mode 100644
index 0000000..a900644
--- /dev/null
+++ b/tests/snippets/less/test_single_line_comments.txt
@@ -0,0 +1,21 @@
+---input---
+.font-light {
+ font-weight: 100; // Comment
+}
+
+---tokens---
+'.' Punctuation
+'font-light' Name.Class
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'font-weight' Keyword
+':' Punctuation
+' ' Text.Whitespace
+'100' Literal.Number.Integer
+';' Punctuation
+' ' Text.Whitespace
+'// Comment\n' Comment.Single
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/mason/test_handles_tags_correctly.txt b/tests/snippets/mason/test_handles_tags_correctly.txt
new file mode 100644
index 0000000..0eb215d
--- /dev/null
+++ b/tests/snippets/mason/test_handles_tags_correctly.txt
@@ -0,0 +1,69 @@
+---input---
+<%class>
+has 'foo';
+has 'bar' => (required => 1);
+has 'baz' => (isa => 'Int', default => 17);
+</%class>
+
+---tokens---
+'<%class>' Name.Tag
+'\n' Text.Whitespace
+
+'' Name
+'has' Name
+' ' Text.Whitespace
+"'foo'" Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+'' Name
+'has' Name
+' ' Text.Whitespace
+"'bar'" Literal.String
+' ' Text.Whitespace
+'=' Operator
+'>' Operator
+' ' Text.Whitespace
+'(' Punctuation
+'' Name
+'required' Name
+' ' Text.Whitespace
+'=' Operator
+'>' Operator
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'' Name
+'has' Name
+' ' Text.Whitespace
+"'baz'" Literal.String
+' ' Text.Whitespace
+'=' Operator
+'>' Operator
+' ' Text.Whitespace
+'(' Punctuation
+'' Name
+'isa' Name
+' ' Text.Whitespace
+'=' Operator
+'>' Operator
+' ' Text.Whitespace
+"'Int'" Literal.String
+',' Punctuation
+' ' Text.Whitespace
+'' Name
+'default' Name
+' ' Text.Whitespace
+'=' Operator
+'>' Operator
+' ' Text.Whitespace
+'17' Literal.Number.Integer
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'</%class>' Name.Tag
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_classes_with_properties.txt b/tests/snippets/matlab/test_classes_with_properties.txt
new file mode 100644
index 0000000..7de838eb
--- /dev/null
+++ b/tests/snippets/matlab/test_classes_with_properties.txt
@@ -0,0 +1,105 @@
+---input---
+classdef Name < dynamicprops
+ properties
+ % i am a comment
+ name1
+ name2
+ end
+ properties (Constant = true, SetAccess = protected)
+ % i too am a comment
+ matrix = [0, 1, 2];
+ string = 'i am a string'
+ end
+ methods
+ % i am also a comment
+ function self = Name()
+ % i am a comment inside a constructor
+ end
+ end
+end
+
+---tokens---
+'classdef' Keyword
+' ' Text.Whitespace
+'Name' Name
+' ' Text.Whitespace
+'<' Operator
+' ' Text.Whitespace
+'dynamicprops' Keyword
+'\n ' Text.Whitespace
+'properties' Keyword
+'\n ' Text.Whitespace
+'% i am a comment' Comment
+'\n ' Text.Whitespace
+'name1' Name
+'\n ' Text.Whitespace
+'name2' Name
+'\n ' Text.Whitespace
+'end' Keyword
+'\n ' Text.Whitespace
+'properties' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'Constant' Name.Builtin
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'true' Keyword
+',' Punctuation
+' ' Text.Whitespace
+'SetAccess' Name.Builtin
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'protected' Keyword
+')' Punctuation
+'\n ' Text.Whitespace
+'% i too am a comment' Comment
+'\n ' Text.Whitespace
+'matrix' Name
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'0' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+']' Punctuation
+';' Punctuation
+'\n ' Text.Whitespace
+'string' Name
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+"'" Literal.String
+"i am a string'" Literal.String
+'\n ' Text.Whitespace
+'end' Keyword
+'\n ' Text.Whitespace
+'methods' Keyword
+'\n ' Text.Whitespace
+'% i am also a comment' Comment
+'\n ' Text.Whitespace
+'function' Keyword
+' ' Text.Whitespace
+'self' Text
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'Name' Name.Function
+'(' Punctuation
+')' Punctuation
+'\n ' Text.Whitespace
+'% i am a comment inside a constructor' Comment
+'\n ' Text.Whitespace
+'end' Keyword
+'\n ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_command_mode.txt b/tests/snippets/matlab/test_command_mode.txt
new file mode 100644
index 0000000..e9a8c11
--- /dev/null
+++ b/tests/snippets/matlab/test_command_mode.txt
@@ -0,0 +1,12 @@
+# MATLAB allows char function arguments to not be enclosed by parentheses
+# or contain quote characters, as long as they are space separated. Test
+# that one common such function is formatted appropriately.
+
+---input---
+help sin
+
+---tokens---
+'help' Name.Builtin
+' ' Text.Whitespace
+'sin' Name.Builtin
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_comment_after_continuation.txt b/tests/snippets/matlab/test_comment_after_continuation.txt
new file mode 100644
index 0000000..baf88e3
--- /dev/null
+++ b/tests/snippets/matlab/test_comment_after_continuation.txt
@@ -0,0 +1,25 @@
+# Test that text after the line continuation ellipses is marked as a comment.
+
+---input---
+set('T',300,... a comment
+'P',101325);
+
+---tokens---
+'set' Name.Builtin
+'(' Punctuation
+"'" Literal.String
+"T'" Literal.String
+',' Punctuation
+'300' Literal.Number.Integer
+',' Punctuation
+'...' Keyword
+' a comment' Comment
+'\n' Text.Whitespace
+
+"'" Literal.String
+"P'" Literal.String
+',' Punctuation
+'101325' Literal.Number.Integer
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_dot_operator.txt b/tests/snippets/matlab/test_dot_operator.txt
new file mode 100644
index 0000000..b858cd3
--- /dev/null
+++ b/tests/snippets/matlab/test_dot_operator.txt
@@ -0,0 +1,10 @@
+# 1./x is (1)(./)(x), not (1.)(/)(x)
+
+---input---
+1./x
+
+---tokens---
+'1' Literal.Number.Integer
+'./' Operator
+'x' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_keywords_ended_by_newline.txt b/tests/snippets/matlab/test_keywords_ended_by_newline.txt
new file mode 100644
index 0000000..59dca03
--- /dev/null
+++ b/tests/snippets/matlab/test_keywords_ended_by_newline.txt
@@ -0,0 +1,36 @@
+# Test that keywords on their own line are marked as keywords.
+
+---input---
+if x > 100
+ disp('x > 100')
+else
+ disp('x < 100')
+end
+
+---tokens---
+'if' Keyword
+' ' Text.Whitespace
+'x' Name
+' ' Text.Whitespace
+'>' Operator
+' ' Text.Whitespace
+'100' Literal.Number.Integer
+'\n ' Text.Whitespace
+'disp' Name.Builtin
+'(' Punctuation
+"'" Literal.String
+"x > 100'" Literal.String
+')' Punctuation
+'\n' Text.Whitespace
+
+'else' Keyword
+'\n ' Text.Whitespace
+'disp' Name.Builtin
+'(' Punctuation
+"'" Literal.String
+"x < 100'" Literal.String
+')' Punctuation
+'\n' Text.Whitespace
+
+'end' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_line_continuation.txt b/tests/snippets/matlab/test_line_continuation.txt
new file mode 100644
index 0000000..1e47368
--- /dev/null
+++ b/tests/snippets/matlab/test_line_continuation.txt
@@ -0,0 +1,25 @@
+# Test that line continuation by ellipses does not produce generic
+# output on the second line.
+
+---input---
+set('T',300,...
+'P',101325);
+
+---tokens---
+'set' Name.Builtin
+'(' Punctuation
+"'" Literal.String
+"T'" Literal.String
+',' Punctuation
+'300' Literal.Number.Integer
+',' Punctuation
+'...' Keyword
+'\n' Text.Whitespace
+
+"'" Literal.String
+"P'" Literal.String
+',' Punctuation
+'101325' Literal.Number.Integer
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_multiple_spaces_variable_assignment.txt b/tests/snippets/matlab/test_multiple_spaces_variable_assignment.txt
new file mode 100644
index 0000000..ec5ac24
--- /dev/null
+++ b/tests/snippets/matlab/test_multiple_spaces_variable_assignment.txt
@@ -0,0 +1,13 @@
+# Test that multiple spaces with an equal sign doesn't get formatted to a string.
+
+---input---
+x = 100;
+
+---tokens---
+'x' Name
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'100' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_one_space_assignment.txt b/tests/snippets/matlab/test_one_space_assignment.txt
new file mode 100644
index 0000000..ceafb6e
--- /dev/null
+++ b/tests/snippets/matlab/test_one_space_assignment.txt
@@ -0,0 +1,13 @@
+# Test that one space before an equal sign is formatted correctly.
+
+---input---
+x = 100;
+
+---tokens---
+'x' Name
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'100' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_operator_multiple_space.txt b/tests/snippets/matlab/test_operator_multiple_space.txt
new file mode 100644
index 0000000..e13d3a3
--- /dev/null
+++ b/tests/snippets/matlab/test_operator_multiple_space.txt
@@ -0,0 +1,13 @@
+# Test that multiple spaces with an operator doesn't get formatted to a string.
+
+---input---
+x > 100;
+
+---tokens---
+'x' Name
+' ' Text.Whitespace
+'>' Operator
+' ' Text.Whitespace
+'100' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlab/test_single_line.txt b/tests/snippets/matlab/test_single_line.txt
new file mode 100644
index 0000000..72a48f8
--- /dev/null
+++ b/tests/snippets/matlab/test_single_line.txt
@@ -0,0 +1,18 @@
+---input---
+set('T',300,'P',101325);
+
+---tokens---
+'set' Name.Builtin
+'(' Punctuation
+"'" Literal.String
+"T'" Literal.String
+',' Punctuation
+'300' Literal.Number.Integer
+',' Punctuation
+"'" Literal.String
+"P'" Literal.String
+',' Punctuation
+'101325' Literal.Number.Integer
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/matlabsession/test_wrong_continuation.txt b/tests/snippets/matlabsession/test_wrong_continuation.txt
new file mode 100644
index 0000000..6ea3e31
--- /dev/null
+++ b/tests/snippets/matlabsession/test_wrong_continuation.txt
@@ -0,0 +1,18 @@
+---input---
+>> foo()
+bar
+...
+baz
+
+---tokens---
+'>> ' Generic.Prompt
+'foo' Name
+'(' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'bar\n' Generic.Output
+
+'...\n' Generic.Output
+
+'baz\n' Generic.Output
diff --git a/tests/snippets/mcfunction/commenting.txt b/tests/snippets/mcfunction/commenting.txt
new file mode 100644
index 0000000..7a5a37d
--- /dev/null
+++ b/tests/snippets/mcfunction/commenting.txt
@@ -0,0 +1,173 @@
+---input---
+#> Get: #rx.playerdb:api/v2/get
+#>
+#> @input
+#> $in.uid rx.io
+#>
+#> @output
+#> rx.playerdb:io player
+#
+# Selects data inside the database and copies to rx.playerdb:io player
+# See #api/v2/select for more info..
+#
+#* Note: something, something, this is important..
+
+# Normal Comment
+
+# This **shouldn't** be a comment..
+scoreboard players operation @s obj = #fakeplayer obj
+
+#> single line block comment
+tellraw @a "This string # has # hashtags o_O"
+
+---tokens---
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'Get:' Literal.String.Doc
+' ' Text.Whitespace
+'#rx.playerdb:api/v2/get' Name.Function
+'\n' Text
+
+'#>' Comment.Multiline
+'\n' Text
+
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'@input' Name.Decorator
+'\n' Text
+
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'$in.uid' Name.Variable.Magic
+' ' Text.Whitespace
+'rx.io' Literal.String.Doc
+'\n' Text
+
+'#>' Comment.Multiline
+'\n' Text
+
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'@output' Name.Decorator
+'\n' Text
+
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'rx.playerdb:io' Name.Function
+' ' Text.Whitespace
+'player' Literal.String.Doc
+'\n' Text
+
+'#' Comment.Multiline
+'\n' Text
+
+'#' Comment.Multiline
+' ' Text.Whitespace
+'Selects' Comment.Multiline
+' ' Text.Whitespace
+'data' Comment.Multiline
+' ' Text.Whitespace
+'inside' Comment.Multiline
+' ' Text.Whitespace
+'the' Comment.Multiline
+' ' Text.Whitespace
+'database' Comment.Multiline
+' ' Text.Whitespace
+'and' Comment.Multiline
+' ' Text.Whitespace
+'copies' Comment.Multiline
+' ' Text.Whitespace
+'to' Comment.Multiline
+' ' Text.Whitespace
+'rx.playerdb:io' Name.Function
+' ' Text.Whitespace
+'player' Comment.Multiline
+'\n' Text
+
+'#' Comment.Multiline
+' ' Text.Whitespace
+'See' Comment.Multiline
+' ' Text.Whitespace
+'#api/v2/select' Name.Function
+' ' Text.Whitespace
+'for' Comment.Multiline
+' ' Text.Whitespace
+'more' Comment.Multiline
+' ' Text.Whitespace
+'info..' Comment.Multiline
+'\n' Text
+
+'#' Comment.Multiline
+' \n' Text.Whitespace
+
+'#*' Comment.Multiline
+' ' Text.Whitespace
+'Note:' Comment.Multiline
+' ' Text.Whitespace
+'something,' Comment.Multiline
+' ' Text.Whitespace
+'something,' Comment.Multiline
+' ' Text.Whitespace
+'this' Comment.Multiline
+' ' Text.Whitespace
+'is' Comment.Multiline
+' ' Text.Whitespace
+'important..' Comment.Multiline
+'\n' Text
+
+'\n#' Comment.Multiline
+' ' Text.Whitespace
+'Normal' Comment.Multiline
+' ' Text.Whitespace
+'Comment' Comment.Multiline
+'\n' Text
+
+'\n#' Comment.Multiline
+' ' Text.Whitespace
+'This' Comment.Multiline
+' ' Text.Whitespace
+"**shouldn't**" Comment.Multiline
+' ' Text.Whitespace
+'be' Comment.Multiline
+' ' Text.Whitespace
+'a' Comment.Multiline
+' ' Text.Whitespace
+'comment..' Comment.Multiline
+'\n' Text
+
+'scoreboard' Name.Builtin
+' ' Text.Whitespace
+'players' Keyword.Constant
+' ' Text.Whitespace
+'operation' Keyword.Constant
+' ' Text.Whitespace
+'@s' Name.Variable
+' ' Text.Whitespace
+'obj' Keyword.Constant
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'#fakeplayer' Name.Variable.Magic
+' ' Text.Whitespace
+'obj' Keyword.Constant
+'\n\n' Text.Whitespace
+
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'single' Literal.String.Doc
+' ' Text.Whitespace
+'line' Literal.String.Doc
+' ' Text.Whitespace
+'block' Literal.String.Doc
+' ' Text.Whitespace
+'comment' Literal.String.Doc
+'\n' Text
+
+'tellraw' Name.Builtin
+' ' Text.Whitespace
+'@a' Name.Variable
+' ' Text.Whitespace
+'"' Literal.String.Double
+'This string # has # hashtags o_O' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/mcfunction/coordinates.txt b/tests/snippets/mcfunction/coordinates.txt
new file mode 100644
index 0000000..d5398bc
--- /dev/null
+++ b/tests/snippets/mcfunction/coordinates.txt
@@ -0,0 +1,188 @@
+---input---
+# normal
+tp 1 2 3
+tp 100.5 80 -100.5
+
+# relative
+tp 10 ~ -10
+tp 10 ~10 -10
+tp 10 ~0.5 -10
+tp 10 ~.5 -10
+tp 10 ~-10 -10
+tp 10 ~-0.5 -10
+tp 10 ~-.5 -10
+
+# carrot
+tp 10 ^ -10
+tp 10 ^10 -10
+tp 10 ^0.5 -10
+tp 10 ^.5 -10
+tp 10 ^-10 -10
+tp 10 ^-0.5 -10
+tp 10 ^-.5 -10
+
+---tokens---
+'# normal' Comment.Single
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'1' Literal.Number.Float
+' ' Text.Whitespace
+'2' Literal.Number.Float
+' ' Text.Whitespace
+'3' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'100.5' Literal.Number.Float
+' ' Text.Whitespace
+'80' Literal.Number.Float
+' ' Text.Whitespace
+'-100.5' Literal.Number.Float
+'\n\n' Text.Whitespace
+
+'# relative' Comment.Single
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+'0.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+'.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+'-10' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+'-0.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'~' Operator
+'-.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n\n' Text.Whitespace
+
+'# carrot' Comment.Single
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+'0.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+'.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+'-10' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+'-0.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'tp' Name.Builtin
+' ' Text.Whitespace
+'10' Literal.Number.Float
+' ' Text.Whitespace
+'^' Operator
+'-.5' Literal.Number.Float
+' ' Text.Whitespace
+'-10' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/mcfunction/data.txt b/tests/snippets/mcfunction/data.txt
new file mode 100644
index 0000000..440d668
--- /dev/null
+++ b/tests/snippets/mcfunction/data.txt
@@ -0,0 +1,120 @@
+---input---
+data modify storage my:storage root set value {
+ key: "This NBT Compound is multiple lines",
+ Count: 10b,
+ tags: [
+ 0,
+ 1,
+ ],
+ UUID
+}
+
+tellraw @a {
+ "text": "how ever",
+ "color": "blue",
+ "extra": [
+ "this is json o_O"
+ ]
+}
+
+---tokens---
+'data' Name.Builtin
+' ' Text.Whitespace
+'modify' Keyword.Constant
+' ' Text.Whitespace
+'storage' Keyword.Constant
+' ' Text.Whitespace
+'my:storage' Name.Function
+' ' Text.Whitespace
+'root' Keyword.Constant
+' ' Text.Whitespace
+'set' Keyword.Constant
+' ' Text.Whitespace
+'value' Keyword.Constant
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'key' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'This NBT Compound is multiple lines' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+'\n ' Text.Whitespace
+'Count' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'10' Literal.Number.Integer
+'b' Name.Attribute
+',' Punctuation
+'\n ' Text.Whitespace
+'tags' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'\n ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+'\n ' Text.Whitespace
+'1' Literal.Number.Integer
+',' Punctuation
+'\n ' Text.Whitespace
+']' Punctuation
+',' Punctuation
+'\n ' Text.Whitespace
+'UUID' Name.Attribute
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n\n' Text.Whitespace
+
+'tellraw' Name.Builtin
+' ' Text.Whitespace
+'@a' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'"' Name.Attribute
+'text' Name.Attribute
+'"' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'how ever' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+'\n ' Text.Whitespace
+'"' Name.Attribute
+'color' Name.Attribute
+'"' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'blue' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+'\n ' Text.Whitespace
+'"' Name.Attribute
+'extra' Name.Attribute
+'"' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'\n ' Text.Whitespace
+'"' Name.Attribute
+'this' Name.Attribute
+' ' Text.Whitespace
+'is' Name.Attribute
+' ' Text.Whitespace
+'json' Name.Attribute
+' ' Text.Whitespace
+'o_' Name.Attribute
+'O' Name.Attribute
+'"' Name.Attribute
+'\n ' Text.Whitespace
+']' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/mcfunction/difficult_1.txt b/tests/snippets/mcfunction/difficult_1.txt
new file mode 100644
index 0000000..f9ad1bc
--- /dev/null
+++ b/tests/snippets/mcfunction/difficult_1.txt
@@ -0,0 +1,56 @@
+---input---
+# execute as @e[nbt={ Item: {id: "minecraft:diamond", Count: 64 } }] run
+# setblock ~ ~ ~ minecraft:dispenser[facing=up]{Items: [{id: "minecraft:diamond", Count: 1}]}
+# tellraw @a [{"text": "hello", "color": "blue"}, {"text": "world", "color": "blue"}]
+
+execute as @a[advancements={minecraft:story/form_obsidian={foo=true, bar=false},minecraft:story/follow_ender_eye={foo=false, bar=true}}] run
+
+---tokens---
+'# execute as @e[nbt={ Item: {id: "minecraft:diamond", Count: 64 } }] run' Comment.Single
+'\n' Text.Whitespace
+
+'# setblock ~ ~ ~ minecraft:dispenser[facing=up]{Items: [{id: "minecraft:diamond", Count: 1}]}' Comment.Single
+'\n' Text.Whitespace
+
+'# tellraw @a [{"text": "hello", "color": "blue"}, {"text": "world", "color": "blue"}]' Comment.Single
+'\n\n' Text.Whitespace
+
+'execute' Name.Builtin
+' ' Text.Whitespace
+'as' Keyword.Constant
+' ' Text.Whitespace
+'@a' Name.Variable
+'[' Punctuation
+'advancements' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'minecraft:story/form_obsidian' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'foo' Name.Attribute
+'=' Punctuation
+'true' Name.Tag
+',' Punctuation
+' ' Text.Whitespace
+'bar' Name.Attribute
+'=' Punctuation
+'false' Name.Tag
+'}' Punctuation
+',' Punctuation
+'minecraft:story/follow_ender_eye' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'foo' Name.Attribute
+'=' Punctuation
+'false' Name.Tag
+',' Punctuation
+' ' Text.Whitespace
+'bar' Name.Attribute
+'=' Punctuation
+'true' Name.Tag
+'}' Punctuation
+'}' Punctuation
+']' Punctuation
+' ' Text.Whitespace
+'run' Keyword.Constant
+'\n' Text.Whitespace
diff --git a/tests/snippets/mcfunction/multiline.txt b/tests/snippets/mcfunction/multiline.txt
new file mode 100644
index 0000000..1587910
--- /dev/null
+++ b/tests/snippets/mcfunction/multiline.txt
@@ -0,0 +1,108 @@
+---input---
+execute
+ as @a # For each "player",
+ at @s # start at their feet.
+ anchored eyes # Looking through their eyes,
+ facing 0 0 0 # face perfectly at the target
+ anchored feet # (go back to the feet)
+ positioned ^ ^ ^1 # and move one block forward.
+ rotated as @s # Face the direction the player
+ # is actually facing,
+ positioned ^ ^ ^-1 # and move one block back.
+ if entity @s[distance=..0.6] # Check if we're close to the
+ # player's feet.
+ run
+ say "I'm facing the target!"
+
+---tokens---
+'execute' Name.Builtin
+'\n ' Text.Whitespace
+'as' Keyword.Constant
+' ' Text.Whitespace
+'@a' Name.Variable
+' ' Text.Whitespace
+'# For each "player",' Comment.Single
+'\n ' Text.Whitespace
+'at' Keyword.Constant
+' ' Text.Whitespace
+'@s' Name.Variable
+' ' Text.Whitespace
+'# start at their feet.' Comment.Single
+'\n ' Text.Whitespace
+'anchored' Keyword.Constant
+' ' Text.Whitespace
+'eyes' Keyword.Constant
+' ' Text.Whitespace
+'# Looking through their eyes,' Comment.Single
+'\n ' Text.Whitespace
+'facing' Keyword.Constant
+' ' Text.Whitespace
+'0' Literal.Number.Float
+' ' Text.Whitespace
+'0' Literal.Number.Float
+' ' Text.Whitespace
+'0' Literal.Number.Float
+' ' Text.Whitespace
+'# face perfectly at the target' Comment.Single
+'\n ' Text.Whitespace
+'anchored' Keyword.Constant
+' ' Text.Whitespace
+'feet' Keyword.Constant
+' ' Text.Whitespace
+'# (go back to the feet)' Comment.Single
+'\n ' Text.Whitespace
+'positioned' Keyword.Constant
+' ' Text.Whitespace
+'^' Operator
+' ' Text.Whitespace
+'^' Operator
+' ' Text.Whitespace
+'^' Operator
+'1' Literal.Number.Float
+' ' Text.Whitespace
+'# and move one block forward.' Comment.Single
+'\n ' Text.Whitespace
+'rotated' Keyword.Constant
+' ' Text.Whitespace
+'as' Keyword.Constant
+' ' Text.Whitespace
+'@s' Name.Variable
+' ' Text.Whitespace
+'# Face the direction the player' Comment.Single
+'\n ' Text.Whitespace
+'# is actually facing,' Comment.Single
+'\n ' Text.Whitespace
+'positioned' Keyword.Constant
+' ' Text.Whitespace
+'^' Operator
+' ' Text.Whitespace
+'^' Operator
+' ' Text.Whitespace
+'^' Operator
+'-1' Literal.Number.Float
+' ' Text.Whitespace
+'# and move one block back.' Comment.Single
+'\n ' Text.Whitespace
+'if' Keyword.Constant
+' ' Text.Whitespace
+'entity' Keyword.Constant
+' ' Text.Whitespace
+'@s' Name.Variable
+'[' Punctuation
+'distance' Name.Attribute
+'=' Punctuation
+'..' Literal
+'0.6' Literal.Number.Float
+']' Punctuation
+' ' Text.Whitespace
+"# Check if we're close to the" Comment.Single
+'\n ' Text.Whitespace
+"# player's feet." Comment.Single
+'\n ' Text.Whitespace
+'run' Keyword.Constant
+' \n say' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String.Double
+"I'm facing the target!" Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/mcfunction/selectors.txt b/tests/snippets/mcfunction/selectors.txt
new file mode 100644
index 0000000..ea69c78
--- /dev/null
+++ b/tests/snippets/mcfunction/selectors.txt
@@ -0,0 +1,73 @@
+---input---
+execute
+ as @a[
+ advancements={
+ minecraft:story/form_obsidian={
+ foo=true,
+ bar=false
+ },
+ minecraft:story/follow_ender_eye={
+ foo=false, bar=true
+ }
+ }
+ ] run
+ say this command is nuts
+
+---tokens---
+'execute' Name.Builtin
+'\n ' Text.Whitespace
+'as' Keyword.Constant
+' ' Text.Whitespace
+'@a' Name.Variable
+'[' Punctuation
+'\n ' Text.Whitespace
+'advancements' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'\n ' Text.Whitespace
+'minecraft:story/form_obsidian' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'\n ' Text.Whitespace
+'foo' Name.Attribute
+'=' Punctuation
+'true' Name.Tag
+',' Punctuation
+'\n ' Text.Whitespace
+'bar' Name.Attribute
+'=' Punctuation
+'false' Name.Tag
+'\n ' Text.Whitespace
+'}' Punctuation
+',' Punctuation
+'\n ' Text.Whitespace
+'minecraft:story/follow_ender_eye' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'\n ' Text.Whitespace
+'foo' Name.Attribute
+'=' Punctuation
+'false' Name.Tag
+',' Punctuation
+' ' Text.Whitespace
+'bar' Name.Attribute
+'=' Punctuation
+'true' Name.Tag
+'\n ' Text.Whitespace
+'}' Punctuation
+'\n ' Text.Whitespace
+'}' Punctuation
+'\n ' Text.Whitespace
+']' Punctuation
+' ' Text.Whitespace
+'run' Keyword.Constant
+'\n say' Name.Builtin
+' ' Text.Whitespace
+'this' Keyword.Constant
+' ' Text.Whitespace
+'command' Keyword.Constant
+' ' Text.Whitespace
+'is' Keyword.Constant
+' ' Text.Whitespace
+'nuts' Keyword.Constant
+'\n' Text.Whitespace
diff --git a/tests/snippets/mcfunction/simple.txt b/tests/snippets/mcfunction/simple.txt
new file mode 100644
index 0000000..664c91c
--- /dev/null
+++ b/tests/snippets/mcfunction/simple.txt
@@ -0,0 +1,92 @@
+---input---
+#> This command looks for a player with 10 hp and prints a message
+# @param - @s = player
+
+execute as @a[name="rx", nbt={Health: 10.0f}] run tellraw @a {"text": "this is my cool command"} # epic
+
+---tokens---
+'#>' Comment.Multiline
+' ' Text.Whitespace
+'This' Literal.String.Doc
+' ' Text.Whitespace
+'command' Literal.String.Doc
+' ' Text.Whitespace
+'looks' Literal.String.Doc
+' ' Text.Whitespace
+'for' Literal.String.Doc
+' ' Text.Whitespace
+'a' Literal.String.Doc
+' ' Text.Whitespace
+'player' Literal.String.Doc
+' ' Text.Whitespace
+'with' Literal.String.Doc
+' ' Text.Whitespace
+'10' Literal.String.Doc
+' ' Text.Whitespace
+'hp' Literal.String.Doc
+' ' Text.Whitespace
+'and' Literal.String.Doc
+' ' Text.Whitespace
+'prints' Literal.String.Doc
+' ' Text.Whitespace
+'a' Literal.String.Doc
+' ' Text.Whitespace
+'message' Literal.String.Doc
+'\n' Text
+
+'#' Comment.Multiline
+' ' Text.Whitespace
+'@param' Name.Decorator
+' ' Text.Whitespace
+'-' Comment.Multiline
+' ' Text.Whitespace
+'@s' Name.Decorator
+' ' Text.Whitespace
+'=' Comment.Multiline
+' ' Text.Whitespace
+'player' Comment.Multiline
+'\n' Text
+
+'\n' Text.Whitespace
+
+'execute' Name.Builtin
+' ' Text.Whitespace
+'as' Keyword.Constant
+' ' Text.Whitespace
+'@a' Name.Variable
+'[' Punctuation
+'name' Name.Attribute
+'=' Punctuation
+'"' Literal.String.Double
+'rx' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+' ' Text.Whitespace
+'nbt' Name.Attribute
+'=' Punctuation
+'{' Punctuation
+'Health' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'10.0f' Literal.Number.Float
+'}' Punctuation
+']' Punctuation
+' ' Text.Whitespace
+'run' Keyword.Constant
+' tellraw' Name.Builtin
+' ' Text.Whitespace
+'@a' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'"' Name.Attribute
+'text' Name.Attribute
+'"' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'this is my cool command' Literal.String.Double
+'"' Literal.String.Double
+'}' Punctuation
+' ' Text.Whitespace
+'# epic' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_bold_fenced_by_asterisk.txt b/tests/snippets/md/test_bold_fenced_by_asterisk.txt
new file mode 100644
index 0000000..25c899d
--- /dev/null
+++ b/tests/snippets/md/test_bold_fenced_by_asterisk.txt
@@ -0,0 +1,15 @@
+---input---
+**bold**
+
+(**bold**)
+
+---tokens---
+'**bold**' Generic.Strong
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'(' Text
+'**bold**' Generic.Strong
+')' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_bold_fenced_by_underscore.txt b/tests/snippets/md/test_bold_fenced_by_underscore.txt
new file mode 100644
index 0000000..49c9110
--- /dev/null
+++ b/tests/snippets/md/test_bold_fenced_by_underscore.txt
@@ -0,0 +1,15 @@
+---input---
+__bold__
+
+(__bold__)
+
+---tokens---
+'__bold__' Generic.Strong
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'(' Text
+'__bold__' Generic.Strong
+')' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_bulleted_list_1.txt b/tests/snippets/md/test_bulleted_list_1.txt
new file mode 100644
index 0000000..8da212c
--- /dev/null
+++ b/tests/snippets/md/test_bulleted_list_1.txt
@@ -0,0 +1,14 @@
+---input---
+* foo
+* bar
+
+---tokens---
+'*' Keyword
+' ' Text.Whitespace
+'foo' Text
+'\n' Text.Whitespace
+
+'*' Keyword
+' ' Text.Whitespace
+'bar' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_bulleted_list_2.txt b/tests/snippets/md/test_bulleted_list_2.txt
new file mode 100644
index 0000000..040b908
--- /dev/null
+++ b/tests/snippets/md/test_bulleted_list_2.txt
@@ -0,0 +1,14 @@
+---input---
+- foo
+- bar
+
+---tokens---
+'-' Keyword
+' ' Text.Whitespace
+'foo' Text
+'\n' Text.Whitespace
+
+'-' Keyword
+' ' Text.Whitespace
+'bar' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_bulleted_list_3.txt b/tests/snippets/md/test_bulleted_list_3.txt
new file mode 100644
index 0000000..a59d5f7
--- /dev/null
+++ b/tests/snippets/md/test_bulleted_list_3.txt
@@ -0,0 +1,14 @@
+---input---
+* *foo*
+* bar
+
+---tokens---
+'*' Keyword
+' ' Text.Whitespace
+'*foo*' Generic.Emph
+'\n' Text.Whitespace
+
+'*' Keyword
+' ' Text.Whitespace
+'bar' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_bulleted_list_4.txt b/tests/snippets/md/test_bulleted_list_4.txt
new file mode 100644
index 0000000..111f9cf
--- /dev/null
+++ b/tests/snippets/md/test_bulleted_list_4.txt
@@ -0,0 +1,19 @@
+---input---
+```
+code
+```
+* *foo*
+* bar
+
+---tokens---
+'```\ncode\n```\n' Literal.String.Backtick
+
+'*' Keyword
+' ' Text.Whitespace
+'*foo*' Generic.Emph
+'\n' Text.Whitespace
+
+'*' Keyword
+' ' Text.Whitespace
+'bar' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_code_block_fenced_by_backticks.txt b/tests/snippets/md/test_code_block_fenced_by_backticks.txt
new file mode 100644
index 0000000..4f8fefa
--- /dev/null
+++ b/tests/snippets/md/test_code_block_fenced_by_backticks.txt
@@ -0,0 +1,15 @@
+---input---
+```
+code
+```
+
+```
+multi
+`line`
+code
+```
+
+---tokens---
+'```\ncode\n```\n' Literal.String.Backtick
+
+'\n```\nmulti\n`line`\ncode\n```\n' Literal.String.Backtick
diff --git a/tests/snippets/md/test_code_block_with_language.txt b/tests/snippets/md/test_code_block_with_language.txt
new file mode 100644
index 0000000..41f2563
--- /dev/null
+++ b/tests/snippets/md/test_code_block_with_language.txt
@@ -0,0 +1,16 @@
+---input---
+```python
+import this
+```
+
+---tokens---
+'```' Literal.String.Backtick
+'python' Literal.String.Backtick
+'\n' Text
+
+'import' Keyword.Namespace
+' ' Text
+'this' Name.Namespace
+'\n' Text.Whitespace
+
+'```\n' Literal.String.Backtick
diff --git a/tests/snippets/md/test_escape_italics.txt b/tests/snippets/md/test_escape_italics.txt
new file mode 100644
index 0000000..42e1cb4
--- /dev/null
+++ b/tests/snippets/md/test_escape_italics.txt
@@ -0,0 +1,23 @@
+---input---
+\*no italics\*
+
+\_ no italics \_
+
+---tokens---
+'\\*' Text
+'no' Text
+' ' Text
+'italics' Text
+'\\*' Text
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'\\_' Text
+' ' Text
+'no' Text
+' ' Text
+'italics' Text
+' ' Text
+'\\_' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_inline_code.txt b/tests/snippets/md/test_inline_code.txt
new file mode 100644
index 0000000..c9a3964
--- /dev/null
+++ b/tests/snippets/md/test_inline_code.txt
@@ -0,0 +1,36 @@
+---input---
+code: `code`
+
+ `**code**`
+
+(`code`)
+
+code (`in brackets`)
+
+---tokens---
+'code:' Text
+' ' Text
+'`code`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text
+'`**code**`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'(' Text
+'`code`' Literal.String.Backtick
+')' Text
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'code' Text
+' ' Text
+'(' Text
+'`in brackets`' Literal.String.Backtick
+')' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_inline_code_after_block.txt b/tests/snippets/md/test_inline_code_after_block.txt
new file mode 100644
index 0000000..e1f704b
--- /dev/null
+++ b/tests/snippets/md/test_inline_code_after_block.txt
@@ -0,0 +1,19 @@
+---input---
+```
+code
+```
+* nocode
+* `code`
+
+---tokens---
+'```\ncode\n```\n' Literal.String.Backtick
+
+'*' Keyword
+' ' Text.Whitespace
+'nocode' Text
+'\n' Text.Whitespace
+
+'*' Keyword
+' ' Text.Whitespace
+'`code`' Literal.String.Backtick
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_inline_code_in_list.txt b/tests/snippets/md/test_inline_code_in_list.txt
new file mode 100644
index 0000000..9cd8aa0
--- /dev/null
+++ b/tests/snippets/md/test_inline_code_in_list.txt
@@ -0,0 +1,26 @@
+---input---
+* `code`
+
+- `code`
+
+1. `code`
+
+---tokens---
+'*' Keyword
+' ' Text.Whitespace
+'`code`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'-' Keyword
+' ' Text.Whitespace
+'`code`' Literal.String.Backtick
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'1.' Keyword
+' ' Text
+'`code`' Literal.String.Backtick
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_invalid_bold.txt b/tests/snippets/md/test_invalid_bold.txt
new file mode 100644
index 0000000..6a5db6a
--- /dev/null
+++ b/tests/snippets/md/test_invalid_bold.txt
@@ -0,0 +1,31 @@
+---input---
+**no bold__
+
+__no bold**
+
+*no bold*
+
+_no bold_
+
+---tokens---
+'**no' Text
+' ' Text
+'bold__' Text
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'__no' Text
+' ' Text
+'bold**' Text
+'\n' Text.Whitespace
+
+'\n' Text
+
+'*no bold*' Generic.Emph
+'\n' Text.Whitespace
+
+'\n' Text
+
+'_no bold_' Generic.Emph
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_invalid_italics.txt b/tests/snippets/md/test_invalid_italics.txt
new file mode 100644
index 0000000..28acb5d
--- /dev/null
+++ b/tests/snippets/md/test_invalid_italics.txt
@@ -0,0 +1,31 @@
+---input---
+*no italics_
+
+_no italics*
+
+**no italics**
+
+__no italics__
+
+---tokens---
+'*no' Text
+' ' Text
+'italics_' Text
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'_no' Text
+' ' Text
+'italics*' Text
+'\n' Text.Whitespace
+
+'\n' Text
+
+'**no italics**' Generic.Strong
+'\n' Text.Whitespace
+
+'\n' Text
+
+'__no italics__' Generic.Strong
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_italics_and_bold.txt b/tests/snippets/md/test_italics_and_bold.txt
new file mode 100644
index 0000000..bb2bf4f
--- /dev/null
+++ b/tests/snippets/md/test_italics_and_bold.txt
@@ -0,0 +1,21 @@
+---input---
+**bold** and *italics*
+
+*italics* and **bold**
+
+---tokens---
+'**bold**' Generic.Strong
+' ' Text
+'and' Text
+' ' Text
+'*italics*' Generic.Emph
+'\n' Text.Whitespace
+
+'\n' Text
+
+'*italics*' Generic.Emph
+' ' Text
+'and' Text
+' ' Text
+'**bold**' Generic.Strong
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_italics_fenced_by_asterisk.txt b/tests/snippets/md/test_italics_fenced_by_asterisk.txt
new file mode 100644
index 0000000..cd8775e
--- /dev/null
+++ b/tests/snippets/md/test_italics_fenced_by_asterisk.txt
@@ -0,0 +1,15 @@
+---input---
+*italics*
+
+(*italics*)
+
+---tokens---
+'*italics*' Generic.Emph
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'(' Text
+'*italics*' Generic.Emph
+')' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_italics_fenced_by_underscore.txt b/tests/snippets/md/test_italics_fenced_by_underscore.txt
new file mode 100644
index 0000000..5f57756
--- /dev/null
+++ b/tests/snippets/md/test_italics_fenced_by_underscore.txt
@@ -0,0 +1,15 @@
+---input---
+_italics_
+
+(_italics_)
+
+---tokens---
+'_italics_' Generic.Emph
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'(' Text
+'_italics_' Generic.Emph
+')' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_italics_no_multiline.txt b/tests/snippets/md/test_italics_no_multiline.txt
new file mode 100644
index 0000000..2657ed1
--- /dev/null
+++ b/tests/snippets/md/test_italics_no_multiline.txt
@@ -0,0 +1,10 @@
+---input---
+*no
+italics*
+
+---tokens---
+'*no' Text
+'\n' Text.Whitespace
+
+'italics*' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_links.txt b/tests/snippets/md/test_links.txt
new file mode 100644
index 0000000..9fc46e8
--- /dev/null
+++ b/tests/snippets/md/test_links.txt
@@ -0,0 +1,23 @@
+---input---
+[text](link)
+
+![Image of foo](https://bar.baz)
+
+---tokens---
+'[' Text
+'text' Name.Tag
+']' Text
+'(' Text
+'link' Name.Attribute
+')' Text
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'![' Text
+'Image of foo' Name.Tag
+']' Text
+'(' Text
+'https://bar.baz' Name.Attribute
+')' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_mentions.txt b/tests/snippets/md/test_mentions.txt
new file mode 100644
index 0000000..a2def5c
--- /dev/null
+++ b/tests/snippets/md/test_mentions.txt
@@ -0,0 +1,10 @@
+---input---
+note for @me:
+
+---tokens---
+'note' Text
+' ' Text
+'for' Text
+' ' Text
+'@me:' Name.Entity
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_numbered_list.txt b/tests/snippets/md/test_numbered_list.txt
new file mode 100644
index 0000000..1384d6b
--- /dev/null
+++ b/tests/snippets/md/test_numbered_list.txt
@@ -0,0 +1,14 @@
+---input---
+1. foo
+2. bar
+
+---tokens---
+'1.' Keyword
+' ' Text
+'foo' Text
+'\n' Text.Whitespace
+
+'2.' Keyword
+' ' Text
+'bar' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_quote.txt b/tests/snippets/md/test_quote.txt
new file mode 100644
index 0000000..cc64ec2
--- /dev/null
+++ b/tests/snippets/md/test_quote.txt
@@ -0,0 +1,10 @@
+---input---
+> a
+> quote
+
+---tokens---
+'> ' Keyword
+'a\n' Generic.Emph
+
+'> ' Keyword
+'quote\n' Generic.Emph
diff --git a/tests/snippets/md/test_reference_style_links.txt b/tests/snippets/md/test_reference_style_links.txt
new file mode 100644
index 0000000..813cef3
--- /dev/null
+++ b/tests/snippets/md/test_reference_style_links.txt
@@ -0,0 +1,18 @@
+---input---
+[an example][id]
+[id]: http://example.com
+
+---tokens---
+'[' Text
+'an example' Name.Tag
+']' Text
+'[' Text
+'id' Name.Label
+']' Text
+'\n' Text.Whitespace
+
+'[' Text
+'id' Name.Label
+']: ' Text
+'http://example.com' Name.Attribute
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_strikethrough.txt b/tests/snippets/md/test_strikethrough.txt
new file mode 100644
index 0000000..483c63a
--- /dev/null
+++ b/tests/snippets/md/test_strikethrough.txt
@@ -0,0 +1,9 @@
+---input---
+~~striked~~not striked
+
+---tokens---
+'~~striked~~' Generic.Deleted
+'not' Text
+' ' Text
+'striked' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_task_list.txt b/tests/snippets/md/test_task_list.txt
new file mode 100644
index 0000000..7a64506
--- /dev/null
+++ b/tests/snippets/md/test_task_list.txt
@@ -0,0 +1,34 @@
+---input---
+- [ ] sample task
+
+* [ ] sample task
+
+ * [ ] sample task
+
+---tokens---
+'- ' Keyword
+'[ ]' Keyword
+' ' Text
+'sample' Text
+' ' Text
+'task' Text
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'* ' Keyword
+'[ ]' Keyword
+' ' Text
+'sample' Text
+' ' Text
+'task' Text
+'\n' Text.Whitespace
+
+'\n ' Text.Whitespace
+'* ' Keyword
+'[ ]' Keyword
+' ' Text
+'sample' Text
+' ' Text
+'task' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/md/test_topics.txt b/tests/snippets/md/test_topics.txt
new file mode 100644
index 0000000..18029cc
--- /dev/null
+++ b/tests/snippets/md/test_topics.txt
@@ -0,0 +1,10 @@
+---input---
+message to #you:
+
+---tokens---
+'message' Text
+' ' Text
+'to' Text
+' ' Text
+'#you:' Name.Entity
+'\n' Text.Whitespace
diff --git a/tests/snippets/mips/deprecated_substrings.txt b/tests/snippets/mips/deprecated_substrings.txt
new file mode 100644
index 0000000..4e7aa7d
--- /dev/null
+++ b/tests/snippets/mips/deprecated_substrings.txt
@@ -0,0 +1,34 @@
+---input---
+beql
+bnel
+bgtzl
+bgezl
+bltzl
+blezl
+bltzall
+bgezall
+
+---tokens---
+'beql' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'bnel' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'bgtzl' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'bgezl' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'bltzl' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'blezl' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'bltzall' Keyword.Pseudo
+'\n' Text.Whitespace
+
+'bgezall' Keyword.Pseudo
+'\n' Text.Whitespace
diff --git a/tests/snippets/mips/keyword_substrings.txt b/tests/snippets/mips/keyword_substrings.txt
new file mode 100644
index 0000000..70d68d5
--- /dev/null
+++ b/tests/snippets/mips/keyword_substrings.txt
@@ -0,0 +1,254 @@
+---input---
+subu
+subi
+sub
+addu
+addiu
+addi
+add
+multu
+mult
+mulu
+mul
+maddu
+madd
+msubu
+msub
+divu
+div
+nor
+xor
+andi
+and
+ori
+xori
+or
+sllv
+sll
+srlv
+srl
+srav
+sra
+sltiu
+sltu
+slti
+slt
+jalr
+jal
+jr
+j
+bgezal
+bgez
+bltzal
+bltz
+lbu
+lb
+lhu
+lh
+lwr
+lw
+swl
+swr
+sw
+teqi
+teq
+tneqi
+tne
+tgeiu
+tgeu
+tgei
+tge
+tltiu
+tltu
+tlti
+tlt
+
+---tokens---
+'subu' Keyword
+'\n' Text.Whitespace
+
+'subi' Keyword
+'\n' Text.Whitespace
+
+'sub' Keyword
+'\n' Text.Whitespace
+
+'addu' Keyword
+'\n' Text.Whitespace
+
+'addiu' Keyword
+'\n' Text.Whitespace
+
+'addi' Keyword
+'\n' Text.Whitespace
+
+'add' Keyword
+'\n' Text.Whitespace
+
+'multu' Keyword
+'\n' Text.Whitespace
+
+'mult' Keyword
+'\n' Text.Whitespace
+
+'mulu' Keyword
+'\n' Text.Whitespace
+
+'mul' Keyword
+'\n' Text.Whitespace
+
+'maddu' Keyword
+'\n' Text.Whitespace
+
+'madd' Keyword
+'\n' Text.Whitespace
+
+'msubu' Keyword
+'\n' Text.Whitespace
+
+'msub' Keyword
+'\n' Text.Whitespace
+
+'divu' Keyword
+'\n' Text.Whitespace
+
+'div' Keyword
+'\n' Text.Whitespace
+
+'nor' Keyword
+'\n' Text.Whitespace
+
+'xor' Keyword
+'\n' Text.Whitespace
+
+'andi' Keyword
+'\n' Text.Whitespace
+
+'and' Keyword
+'\n' Text.Whitespace
+
+'ori' Keyword
+'\n' Text.Whitespace
+
+'xori' Keyword
+'\n' Text.Whitespace
+
+'or' Keyword
+'\n' Text.Whitespace
+
+'sllv' Keyword
+'\n' Text.Whitespace
+
+'sll' Keyword
+'\n' Text.Whitespace
+
+'srlv' Keyword
+'\n' Text.Whitespace
+
+'srl' Keyword
+'\n' Text.Whitespace
+
+'srav' Keyword
+'\n' Text.Whitespace
+
+'sra' Keyword
+'\n' Text.Whitespace
+
+'sltiu' Keyword
+'\n' Text.Whitespace
+
+'sltu' Keyword
+'\n' Text.Whitespace
+
+'slti' Keyword
+'\n' Text.Whitespace
+
+'slt' Keyword
+'\n' Text.Whitespace
+
+'jalr' Keyword
+'\n' Text.Whitespace
+
+'jal' Keyword
+'\n' Text.Whitespace
+
+'jr' Keyword
+'\n' Text.Whitespace
+
+'j' Keyword
+'\n' Text.Whitespace
+
+'bgezal' Keyword
+'\n' Text.Whitespace
+
+'bgez' Keyword
+'\n' Text.Whitespace
+
+'bltzal' Keyword
+'\n' Text.Whitespace
+
+'bltz' Keyword
+'\n' Text.Whitespace
+
+'lbu' Keyword
+'\n' Text.Whitespace
+
+'lb' Keyword
+'\n' Text.Whitespace
+
+'lhu' Keyword
+'\n' Text.Whitespace
+
+'lh' Keyword
+'\n' Text.Whitespace
+
+'lwr' Keyword
+'\n' Text.Whitespace
+
+'lw' Keyword
+'\n' Text.Whitespace
+
+'swl' Keyword
+'\n' Text.Whitespace
+
+'swr' Keyword
+'\n' Text.Whitespace
+
+'sw' Keyword
+'\n' Text.Whitespace
+
+'teqi' Keyword
+'\n' Text.Whitespace
+
+'teq' Keyword
+'\n' Text.Whitespace
+
+'tneqi' Keyword
+'\n' Text.Whitespace
+
+'tne' Keyword
+'\n' Text.Whitespace
+
+'tgeiu' Keyword
+'\n' Text.Whitespace
+
+'tgeu' Keyword
+'\n' Text.Whitespace
+
+'tgei' Keyword
+'\n' Text.Whitespace
+
+'tge' Keyword
+'\n' Text.Whitespace
+
+'tltiu' Keyword
+'\n' Text.Whitespace
+
+'tltu' Keyword
+'\n' Text.Whitespace
+
+'tlti' Keyword
+'\n' Text.Whitespace
+
+'tlt' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/mips/variable_substrings.txt b/tests/snippets/mips/variable_substrings.txt
new file mode 100644
index 0000000..85e845d
--- /dev/null
+++ b/tests/snippets/mips/variable_substrings.txt
@@ -0,0 +1,102 @@
+---input---
+remu
+rem
+mulou
+mulo
+negu
+neg
+beqz
+bgeu
+bge
+bgtu
+bgt
+bleu
+ble
+bltu
+blt
+bnez
+b
+ulhu
+ulh
+sgtu
+sgt
+sgeu
+sge
+sleu
+sle
+
+---tokens---
+'remu' Name.Variable
+'\n' Text.Whitespace
+
+'rem' Name.Variable
+'\n' Text.Whitespace
+
+'mulou' Name.Variable
+'\n' Text.Whitespace
+
+'mulo' Name.Variable
+'\n' Text.Whitespace
+
+'negu' Name.Variable
+'\n' Text.Whitespace
+
+'neg' Name.Variable
+'\n' Text.Whitespace
+
+'beqz' Name.Variable
+'\n' Text.Whitespace
+
+'bgeu' Name.Variable
+'\n' Text.Whitespace
+
+'bge' Name.Variable
+'\n' Text.Whitespace
+
+'bgtu' Name.Variable
+'\n' Text.Whitespace
+
+'bgt' Name.Variable
+'\n' Text.Whitespace
+
+'bleu' Name.Variable
+'\n' Text.Whitespace
+
+'ble' Name.Variable
+'\n' Text.Whitespace
+
+'bltu' Name.Variable
+'\n' Text.Whitespace
+
+'blt' Name.Variable
+'\n' Text.Whitespace
+
+'bnez' Name.Variable
+'\n' Text.Whitespace
+
+'b' Name.Variable
+'\n' Text.Whitespace
+
+'ulhu' Name.Variable
+'\n' Text.Whitespace
+
+'ulh' Name.Variable
+'\n' Text.Whitespace
+
+'sgtu' Name.Variable
+'\n' Text.Whitespace
+
+'sgt' Name.Variable
+'\n' Text.Whitespace
+
+'sgeu' Name.Variable
+'\n' Text.Whitespace
+
+'sge' Name.Variable
+'\n' Text.Whitespace
+
+'sleu' Name.Variable
+'\n' Text.Whitespace
+
+'sle' Name.Variable
+'\n' Text.Whitespace
diff --git a/tests/snippets/nasm/checkid.txt b/tests/snippets/nasm/checkid.txt
new file mode 100644
index 0000000..72f25da
--- /dev/null
+++ b/tests/snippets/nasm/checkid.txt
@@ -0,0 +1,32 @@
+---input---
+print_brick_no_color:
+ inc bx
+ mov di, bx ; comment
+ jmp check_col
+
+---tokens---
+'print_brick_no_color:' Name.Label
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'inc' Name.Function
+' ' Text.Whitespace
+'bx' Name.Builtin
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'mov' Name.Function
+' ' Text.Whitespace
+'di' Name.Builtin
+',' Punctuation
+' ' Text.Whitespace
+'bx' Name.Builtin
+' ' Text.Whitespace
+'; comment' Comment.Single
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'jmp' Name.Function
+' ' Text.Whitespace
+'check_col' Name.Variable
+'\n' Text.Whitespace
diff --git a/tests/snippets/objectivec/test_literal_number_bool.txt b/tests/snippets/objectivec/test_literal_number_bool.txt
new file mode 100644
index 0000000..4d6f965
--- /dev/null
+++ b/tests/snippets/objectivec/test_literal_number_bool.txt
@@ -0,0 +1,7 @@
+---input---
+@NO;
+
+---tokens---
+'@NO' Literal.Number
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/objectivec/test_literal_number_bool_expression.txt b/tests/snippets/objectivec/test_literal_number_bool_expression.txt
new file mode 100644
index 0000000..36eb736
--- /dev/null
+++ b/tests/snippets/objectivec/test_literal_number_bool_expression.txt
@@ -0,0 +1,9 @@
+---input---
+@(YES);
+
+---tokens---
+'@(' Literal
+'YES' Name.Builtin
+')' Literal
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/objectivec/test_literal_number_expression.txt b/tests/snippets/objectivec/test_literal_number_expression.txt
new file mode 100644
index 0000000..4ba2348
--- /dev/null
+++ b/tests/snippets/objectivec/test_literal_number_expression.txt
@@ -0,0 +1,11 @@
+---input---
+@(1+2);
+
+---tokens---
+'@(' Literal
+'1' Literal.Number.Integer
+'+' Operator
+'2' Literal.Number.Integer
+')' Literal
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/objectivec/test_literal_number_int.txt b/tests/snippets/objectivec/test_literal_number_int.txt
new file mode 100644
index 0000000..35d947e
--- /dev/null
+++ b/tests/snippets/objectivec/test_literal_number_int.txt
@@ -0,0 +1,9 @@
+---input---
+@(1);
+
+---tokens---
+'@(' Literal
+'1' Literal.Number.Integer
+')' Literal
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/objectivec/test_literal_number_nested_expression.txt b/tests/snippets/objectivec/test_literal_number_nested_expression.txt
new file mode 100644
index 0000000..b58d37c
--- /dev/null
+++ b/tests/snippets/objectivec/test_literal_number_nested_expression.txt
@@ -0,0 +1,15 @@
+---input---
+@(1+(2+3));
+
+---tokens---
+'@(' Literal
+'1' Literal.Number.Integer
+'+' Operator
+'(' Punctuation
+'2' Literal.Number.Integer
+'+' Operator
+'3' Literal.Number.Integer
+')' Punctuation
+')' Literal
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/objectivec/test_module_import.txt b/tests/snippets/objectivec/test_module_import.txt
new file mode 100644
index 0000000..c66e1c8
--- /dev/null
+++ b/tests/snippets/objectivec/test_module_import.txt
@@ -0,0 +1,9 @@
+---input---
+@import ModuleA;
+
+---tokens---
+'@import' Keyword
+' ' Text.Whitespace
+'ModuleA' Name
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/octave/test_multilinecomment.txt b/tests/snippets/octave/test_multilinecomment.txt
new file mode 100644
index 0000000..0987f4e
--- /dev/null
+++ b/tests/snippets/octave/test_multilinecomment.txt
@@ -0,0 +1,27 @@
+---input---
+%{
+This is a long comment
+ %}
+ #{
+This too
+#}
+This isnt
+
+---tokens---
+'%{\n' Comment.Multiline
+
+'This is a long comment\n' Comment.Multiline
+
+' %}' Comment.Multiline
+'\n ' Text
+'#{\n' Comment.Multiline
+
+'This too\n' Comment.Multiline
+
+'#}' Comment.Multiline
+'\n' Text
+
+'This' Name
+' ' Text
+'isnt' Name
+'\n' Text
diff --git a/tests/snippets/omg-idl/annotation_named_params.txt b/tests/snippets/omg-idl/annotation_named_params.txt
new file mode 100644
index 0000000..2758429
--- /dev/null
+++ b/tests/snippets/omg-idl/annotation_named_params.txt
@@ -0,0 +1,27 @@
+Asserts that annotation named parameters use Name, which is different from the
+normal "scoped_name =" lexing, which uses Name.Constant.
+
+---input---
+@mod::anno(value = const_a) const short const_b = const_a;
+
+---tokens---
+'@mod::anno' Name.Decorator
+'(' Punctuation
+'value' Name
+' ' Text.Whitespace
+'=' Punctuation
+' ' Text.Whitespace
+'const_a' Name
+')' Punctuation
+' ' Text.Whitespace
+'const' Keyword.Declaration
+' ' Text.Whitespace
+'short' Keyword.Type
+' ' Text.Whitespace
+'const_b' Name.Constant
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'const_a' Name
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/omg-idl/enumerators.txt b/tests/snippets/omg-idl/enumerators.txt
new file mode 100644
index 0000000..e6ff1e3
--- /dev/null
+++ b/tests/snippets/omg-idl/enumerators.txt
@@ -0,0 +1,18 @@
+Asserts that enumerators use Name.Constant instead of just Name.
+
+---input---
+enum Enum_t {enum_a, enum_b};
+
+---tokens---
+'enum' Keyword
+' ' Text.Whitespace
+'Enum_t' Name.Class
+' ' Text.Whitespace
+'{' Punctuation
+'enum_a' Name.Constant
+',' Punctuation
+' ' Text.Whitespace
+'enum_b' Name.Constant
+'}' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/peg/test_basic.txt b/tests/snippets/peg/test_basic.txt
new file mode 100644
index 0000000..13a9bb4
--- /dev/null
+++ b/tests/snippets/peg/test_basic.txt
@@ -0,0 +1,17 @@
+---input---
+rule<-("terminal"/nonterminal/[cls])*
+
+---tokens---
+'rule' Name.Class
+'<-' Operator
+'(' Punctuation
+'"terminal"' Literal.String.Double
+'/' Operator
+'nonterminal' Name.Class
+'/' Operator
+'[' Punctuation
+'cls' Literal.String
+']' Punctuation
+')' Punctuation
+'*' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/peg/test_modified_strings.txt b/tests/snippets/peg/test_modified_strings.txt
new file mode 100644
index 0000000..f411226
--- /dev/null
+++ b/tests/snippets/peg/test_modified_strings.txt
@@ -0,0 +1,21 @@
+# see for example:
+# - http://textx.github.io/Arpeggio/
+# - https://nim-lang.org/docs/pegs.html
+# - https://github.com/erikrose/parsimonious
+# can't handle parsimonious-style regex while ~ is a cut operator
+
+---input---
+~"regex" i"insensitive" "multimod"ilx ("not modified")
+
+---tokens---
+'~' Operator
+'"regex"' Literal.String.Double
+' ' Text
+'i"insensitive"' Literal.String.Double
+' ' Text
+'"multimod"ilx' Literal.String.Double
+' ' Text
+'(' Punctuation
+'"not modified"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/peg/test_operators.txt b/tests/snippets/peg/test_operators.txt
new file mode 100644
index 0000000..35aee32
--- /dev/null
+++ b/tests/snippets/peg/test_operators.txt
@@ -0,0 +1,29 @@
+# see for example:
+# - https://github.com/gvanrossum/pegen
+# - https://nim-lang.org/docs/pegs.html
+
+---input---
+rule = 'a' | 'b'
+rule: 'a' ~ 'b'
+
+---tokens---
+'rule' Name.Class
+' ' Text
+'=' Operator
+' ' Text
+"'a'" Literal.String.Single
+' ' Text
+'|' Operator
+' ' Text
+"'b'" Literal.String.Single
+'\n' Text.Whitespace
+
+'rule' Name.Class
+':' Operator
+' ' Text
+"'a'" Literal.String.Single
+' ' Text
+'~' Operator
+' ' Text
+"'b'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/php/test_backslashes_in_strings.txt b/tests/snippets/php/test_backslashes_in_strings.txt
new file mode 100644
index 0000000..4bdbeb5
--- /dev/null
+++ b/tests/snippets/php/test_backslashes_in_strings.txt
@@ -0,0 +1,28 @@
+---input---
+<?php
+$pipe = popen( "flamegraph.pl --title=\"$title\" > /var/www/html/w/docs/flamegraph.svg", 'w' );
+
+---tokens---
+'<?php' Comment.Preproc
+'\n' Text
+
+'$pipe' Name.Variable
+' ' Text
+'=' Operator
+' ' Text
+'popen' Name.Builtin
+'(' Punctuation
+' ' Text
+'"' Literal.String.Double
+'flamegraph.pl --title=' Literal.String.Double
+'\\"' Literal.String.Escape
+'$title' Literal.String.Interpol
+'\\"' Literal.String.Escape
+' > /var/www/html/w/docs/flamegraph.svg' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+' ' Text
+"'w'" Literal.String.Single
+' ' Text
+');' Punctuation
+'\n' Text
diff --git a/tests/snippets/php/test_string_escaping_run.txt b/tests/snippets/php/test_string_escaping_run.txt
new file mode 100644
index 0000000..e31fbfc
--- /dev/null
+++ b/tests/snippets/php/test_string_escaping_run.txt
@@ -0,0 +1,16 @@
+---input---
+<?php $x="{\""; ?>
+
+---tokens---
+'<?php' Comment.Preproc
+' ' Text
+'$x' Name.Variable
+'=' Operator
+'"' Literal.String.Double
+'{' Literal.String.Double
+'\\"' Literal.String.Escape
+'"' Literal.String.Double
+';' Punctuation
+' ' Text
+'?>' Comment.Preproc
+'\n' Other
diff --git a/tests/snippets/powershell/test_colon_punctuation.txt b/tests/snippets/powershell/test_colon_punctuation.txt
new file mode 100644
index 0000000..72733c3
--- /dev/null
+++ b/tests/snippets/powershell/test_colon_punctuation.txt
@@ -0,0 +1,35 @@
+---input---
+$message = (Test-Path C:\) ? "Path exists" : "Path not found"
+[math]::pi
+
+---tokens---
+'$message' Name.Variable
+' ' Text
+'=' Punctuation
+' ' Text
+'(' Punctuation
+'Test-Path' Name.Builtin
+' ' Text
+'C' Name
+':' Punctuation
+'\\' Punctuation
+')' Punctuation
+' ' Text
+'?' Punctuation
+' ' Text
+'"' Literal.String.Double
+'Path exists' Literal.String.Double
+'"' Literal.String.Double
+' ' Text
+':' Punctuation
+' ' Text
+'"' Literal.String.Double
+'Path not found' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text
+
+'[math]' Name.Constant
+':' Punctuation
+':' Punctuation
+'pi' Name
+'\n' Text
diff --git a/tests/snippets/powershell/test_remoting_session.txt b/tests/snippets/powershell/test_remoting_session.txt
new file mode 100644
index 0000000..23fa9c2
--- /dev/null
+++ b/tests/snippets/powershell/test_remoting_session.txt
@@ -0,0 +1,19 @@
+---input---
+[Long-NetBIOS-Hostname]: PS C:\> Get-ChildItem
+
+---tokens---
+'[' Punctuation
+'Long' Name
+'-NetBIOS' Name
+'-Hostname' Name
+']' Punctuation
+':' Punctuation
+' ' Text
+'PS ' Name.Builtin
+'C' Name
+':' Punctuation
+'\\' Punctuation
+'>' Punctuation
+' ' Text
+'Get-ChildItem' Name.Builtin
+'\n' Text
diff --git a/tests/snippets/powershell/test_session.txt b/tests/snippets/powershell/test_session.txt
new file mode 100644
index 0000000..7fe8b42
--- /dev/null
+++ b/tests/snippets/powershell/test_session.txt
@@ -0,0 +1,28 @@
+---input---
+PS C:\> Get-ChildItem
+
+PS> Get-ChildItem
+
+PS > Get-ChildItem
+
+---tokens---
+'PS ' Name.Builtin
+'C' Name
+':' Punctuation
+'\\' Punctuation
+'>' Punctuation
+' ' Text
+'Get-ChildItem' Name.Builtin
+'\n\n' Text
+
+'PS' Name
+'>' Punctuation
+' ' Text
+'Get-ChildItem' Name.Builtin
+'\n\n' Text
+
+'PS ' Name.Builtin
+'>' Punctuation
+' ' Text
+'Get-ChildItem' Name.Builtin
+'\n' Text
diff --git a/tests/snippets/praat/test_broken_unquoted_string.txt b/tests/snippets/praat/test_broken_unquoted_string.txt
new file mode 100644
index 0000000..e108bb8
--- /dev/null
+++ b/tests/snippets/praat/test_broken_unquoted_string.txt
@@ -0,0 +1,16 @@
+---input---
+printline string
+... 'interpolated' string
+
+---tokens---
+'printline' Keyword
+' ' Text.Whitespace
+'string' Literal.String
+'\n' Text.Whitespace
+
+'...' Punctuation
+' ' Text.Whitespace
+"'interpolated'" Literal.String.Interpol
+' ' Text.Whitespace
+'string' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_function_call.txt b/tests/snippets/praat/test_function_call.txt
new file mode 100644
index 0000000..608ef16
--- /dev/null
+++ b/tests/snippets/praat/test_function_call.txt
@@ -0,0 +1,20 @@
+---input---
+selected("Sound", i+(a*b))
+
+---tokens---
+'selected' Name.Function
+'(' Punctuation
+'"' Literal.String
+'Sound' Literal.String
+'"' Literal.String
+',' Punctuation
+' ' Text.Whitespace
+'i' Text
+'+' Operator
+'(' Text
+'a' Text
+'*' Operator
+'b' Text
+')' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_inline_if.txt b/tests/snippets/praat/test_inline_if.txt
new file mode 100644
index 0000000..e370263
--- /dev/null
+++ b/tests/snippets/praat/test_inline_if.txt
@@ -0,0 +1,27 @@
+---input---
+var = if true == 1 then -1 else 0 fi
+
+---tokens---
+'var' Text
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'if' Keyword
+' ' Text.Whitespace
+'true' Text
+' ' Text.Whitespace
+'==' Operator
+' ' Text.Whitespace
+'1' Literal.Number
+' ' Text.Whitespace
+'then' Keyword
+' ' Text.Whitespace
+'-' Operator
+'1' Literal.Number
+' ' Text.Whitespace
+'else' Keyword
+' ' Text.Whitespace
+'0' Literal.Number
+' ' Text.Whitespace
+'fi' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_indexed_numeric_with_precision.txt b/tests/snippets/praat/test_interpolated_indexed_numeric_with_precision.txt
new file mode 100644
index 0000000..0dc23e6
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_indexed_numeric_with_precision.txt
@@ -0,0 +1,6 @@
+---input---
+'a[3]:3'
+
+---tokens---
+"'a[3]:3'" Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_local_numeric_with_precision.txt b/tests/snippets/praat/test_interpolated_local_numeric_with_precision.txt
new file mode 100644
index 0000000..515fa9e
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_local_numeric_with_precision.txt
@@ -0,0 +1,6 @@
+---input---
+'a.a:3'
+
+---tokens---
+"'a.a:3'" Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_numeric_hash.txt b/tests/snippets/praat/test_interpolated_numeric_hash.txt
new file mode 100644
index 0000000..3d6d0f8
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_numeric_hash.txt
@@ -0,0 +1,6 @@
+---input---
+'a["b"]'
+
+---tokens---
+'\'a["b"]\'' Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_numeric_indexed.txt b/tests/snippets/praat/test_interpolated_numeric_indexed.txt
new file mode 100644
index 0000000..ea436f1
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_numeric_indexed.txt
@@ -0,0 +1,6 @@
+---input---
+'a[3]'
+
+---tokens---
+"'a[3]'" Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_numeric_with_precision.txt b/tests/snippets/praat/test_interpolated_numeric_with_precision.txt
new file mode 100644
index 0000000..8ab410d
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_numeric_with_precision.txt
@@ -0,0 +1,6 @@
+---input---
+'a:3'
+
+---tokens---
+"'a:3'" Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_string_hash.txt b/tests/snippets/praat/test_interpolated_string_hash.txt
new file mode 100644
index 0000000..49f5147
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_string_hash.txt
@@ -0,0 +1,6 @@
+---input---
+'a$["b"]'
+
+---tokens---
+'\'a$["b"]\'' Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolated_string_indexed.txt b/tests/snippets/praat/test_interpolated_string_indexed.txt
new file mode 100644
index 0000000..7a845d7
--- /dev/null
+++ b/tests/snippets/praat/test_interpolated_string_indexed.txt
@@ -0,0 +1,6 @@
+---input---
+'a$[3]'
+
+---tokens---
+"'a$[3]'" Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_interpolation_boundary.txt b/tests/snippets/praat/test_interpolation_boundary.txt
new file mode 100644
index 0000000..1f3b14b
--- /dev/null
+++ b/tests/snippets/praat/test_interpolation_boundary.txt
@@ -0,0 +1,14 @@
+---input---
+"'" + "'"
+
+---tokens---
+'"' Literal.String
+"'" Literal.String
+'"' Literal.String
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'"' Literal.String
+"'" Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_numeric_assignment.txt b/tests/snippets/praat/test_numeric_assignment.txt
new file mode 100644
index 0000000..55fc1e6
--- /dev/null
+++ b/tests/snippets/praat/test_numeric_assignment.txt
@@ -0,0 +1,11 @@
+---input---
+var = -15e4
+
+---tokens---
+'var' Text
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'-' Operator
+'15e4' Literal.Number
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_string_assignment.txt b/tests/snippets/praat/test_string_assignment.txt
new file mode 100644
index 0000000..baff23d
--- /dev/null
+++ b/tests/snippets/praat/test_string_assignment.txt
@@ -0,0 +1,12 @@
+---input---
+var$ = "foo"
+
+---tokens---
+'var$' Text
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"' Literal.String
+'foo' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/praat/test_string_escaped_quotes.txt b/tests/snippets/praat/test_string_escaped_quotes.txt
new file mode 100644
index 0000000..1cf6330
--- /dev/null
+++ b/tests/snippets/praat/test_string_escaped_quotes.txt
@@ -0,0 +1,13 @@
+---input---
+"it said ""foo"""
+
+---tokens---
+'"' Literal.String
+'it said ' Literal.String
+'"' Literal.String
+'"' Literal.String
+'foo' Literal.String
+'"' Literal.String
+'"' Literal.String
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_complex_exp_single_quotes.txt b/tests/snippets/promql/test_complex_exp_single_quotes.txt
new file mode 100644
index 0000000..cbbde3a
--- /dev/null
+++ b/tests/snippets/promql/test_complex_exp_single_quotes.txt
@@ -0,0 +1,35 @@
+---input---
+(sum(rate(metric_test_app{app='turtle',proc='web'}[2m])) by(node))
+
+---tokens---
+'(' Operator
+'sum' Keyword
+'(' Operator
+'rate' Keyword.Reserved
+'(' Operator
+'metric_test_app' Name.Variable
+'{' Punctuation
+'app' Name.Label
+'=' Operator
+"'" Punctuation
+'turtle' Literal.String
+"'" Punctuation
+',' Punctuation
+'proc' Name.Label
+'=' Operator
+"'" Punctuation
+'web' Literal.String
+"'" Punctuation
+'}' Punctuation
+'[' Punctuation
+'2m' Literal.String
+']' Punctuation
+')' Operator
+')' Operator
+' ' Text.Whitespace
+'by' Keyword
+'(' Operator
+'node' Name.Variable
+')' Operator
+')' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_expression_and_comment.txt b/tests/snippets/promql/test_expression_and_comment.txt
new file mode 100644
index 0000000..3ee2bfa
--- /dev/null
+++ b/tests/snippets/promql/test_expression_and_comment.txt
@@ -0,0 +1,15 @@
+---input---
+go_gc_duration_seconds{instance="localhost:9090"} # single comment
+
+---tokens---
+'go_gc_duration_seconds' Name.Variable
+'{' Punctuation
+'instance' Name.Label
+'=' Operator
+'"' Punctuation
+'localhost:9090' Literal.String
+'"' Punctuation
+'}' Punctuation
+' ' Text.Whitespace
+'# single comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_function_delta.txt b/tests/snippets/promql/test_function_delta.txt
new file mode 100644
index 0000000..8b1f9b0
--- /dev/null
+++ b/tests/snippets/promql/test_function_delta.txt
@@ -0,0 +1,19 @@
+---input---
+delta(cpu_temp_celsius{host="zeus"}[2h])
+
+---tokens---
+'delta' Keyword.Reserved
+'(' Operator
+'cpu_temp_celsius' Name.Variable
+'{' Punctuation
+'host' Name.Label
+'=' Operator
+'"' Punctuation
+'zeus' Literal.String
+'"' Punctuation
+'}' Punctuation
+'[' Punctuation
+'2h' Literal.String
+']' Punctuation
+')' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_function_multi_line.txt b/tests/snippets/promql/test_function_multi_line.txt
new file mode 100644
index 0000000..31664cc
--- /dev/null
+++ b/tests/snippets/promql/test_function_multi_line.txt
@@ -0,0 +1,80 @@
+---input---
+label_replace(
+ sum by (instance) (
+ irate(node_disk_read_bytes_total[2m])
+ ) / 1024 / 1024,
+ "device",
+ 'disk',
+ "instance",
+ ".*"
+)
+
+---tokens---
+'label_replace' Keyword.Reserved
+'(' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'sum' Keyword
+' ' Text.Whitespace
+'by' Keyword
+' ' Text.Whitespace
+'(' Operator
+'instance' Name.Variable
+')' Operator
+' ' Text.Whitespace
+'(' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'irate' Keyword.Reserved
+'(' Operator
+'node_disk_read_bytes_total' Name.Variable
+'[' Punctuation
+'2m' Literal.String
+']' Punctuation
+')' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+')' Operator
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'1024' Literal.Number.Integer
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'1024' Literal.Number.Integer
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'device' Literal.String
+'"' Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+"'" Punctuation
+'disk' Literal.String
+"'" Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'instance' Literal.String
+'"' Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'.*' Literal.String
+'"' Punctuation
+'\n' Text.Whitespace
+
+')' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_function_multi_line_with_offset.txt b/tests/snippets/promql/test_function_multi_line_with_offset.txt
new file mode 100644
index 0000000..a7462fb
--- /dev/null
+++ b/tests/snippets/promql/test_function_multi_line_with_offset.txt
@@ -0,0 +1,87 @@
+---input---
+label_replace(
+ avg by(instance)
+ (irate(node_cpu_seconds_total{mode = "idle"}[5m] offset 3s)
+ ) * 100,
+ "device",
+ "cpu",
+ "instance",
+ ".*"
+)
+
+---tokens---
+'label_replace' Keyword.Reserved
+'(' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'avg' Keyword
+' ' Text.Whitespace
+'by' Keyword
+'(' Operator
+'instance' Name.Variable
+')' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'(' Operator
+'irate' Keyword.Reserved
+'(' Operator
+'node_cpu_seconds_total' Name.Variable
+'{' Punctuation
+'mode' Name.Label
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"' Punctuation
+'idle' Literal.String
+'"' Punctuation
+'}' Punctuation
+'[' Punctuation
+'5m' Literal.String
+']' Punctuation
+' ' Text.Whitespace
+'offset' Keyword
+' ' Text.Whitespace
+'3s' Literal.String
+')' Operator
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+')' Operator
+' ' Text.Whitespace
+'*' Operator
+' ' Text.Whitespace
+'100' Literal.Number.Integer
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'device' Literal.String
+'"' Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'cpu' Literal.String
+'"' Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'instance' Literal.String
+'"' Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"' Punctuation
+'.*' Literal.String
+'"' Punctuation
+'\n' Text.Whitespace
+
+')' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_function_sum_with_args.txt b/tests/snippets/promql/test_function_sum_with_args.txt
new file mode 100644
index 0000000..3d677e1
--- /dev/null
+++ b/tests/snippets/promql/test_function_sum_with_args.txt
@@ -0,0 +1,19 @@
+---input---
+sum by (app, proc) (instance_memory_usage_bytes)
+
+---tokens---
+'sum' Keyword
+' ' Text.Whitespace
+'by' Keyword
+' ' Text.Whitespace
+'(' Operator
+'app' Name.Variable
+',' Punctuation
+' ' Text.Whitespace
+'proc' Name.Variable
+')' Operator
+' ' Text.Whitespace
+'(' Operator
+'instance_memory_usage_bytes' Name.Variable
+')' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_matching_operator_no_regex_match.txt b/tests/snippets/promql/test_matching_operator_no_regex_match.txt
new file mode 100644
index 0000000..9859739
--- /dev/null
+++ b/tests/snippets/promql/test_matching_operator_no_regex_match.txt
@@ -0,0 +1,16 @@
+---input---
+metric_test_app{status!~'(4|5)..'}[2m]
+
+---tokens---
+'metric_test_app' Name.Variable
+'{' Punctuation
+'status' Name.Label
+'!~' Operator
+"'" Punctuation
+'(4|5)..' Literal.String
+"'" Punctuation
+'}' Punctuation
+'[' Punctuation
+'2m' Literal.String
+']' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_metric.txt b/tests/snippets/promql/test_metric.txt
new file mode 100644
index 0000000..e4889cf
--- /dev/null
+++ b/tests/snippets/promql/test_metric.txt
@@ -0,0 +1,6 @@
+---input---
+go_gc_duration_seconds
+
+---tokens---
+'go_gc_duration_seconds' Name.Variable
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_metric_multiple_labels.txt b/tests/snippets/promql/test_metric_multiple_labels.txt
new file mode 100644
index 0000000..bcb0b64
--- /dev/null
+++ b/tests/snippets/promql/test_metric_multiple_labels.txt
@@ -0,0 +1,19 @@
+---input---
+go_gc_duration_seconds{instance="localhost:9090",job="alertmanager"}
+
+---tokens---
+'go_gc_duration_seconds' Name.Variable
+'{' Punctuation
+'instance' Name.Label
+'=' Operator
+'"' Punctuation
+'localhost:9090' Literal.String
+'"' Punctuation
+',' Punctuation
+'job' Name.Label
+'=' Operator
+'"' Punctuation
+'alertmanager' Literal.String
+'"' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_metric_multiple_labels_with_spaces.txt b/tests/snippets/promql/test_metric_multiple_labels_with_spaces.txt
new file mode 100644
index 0000000..3ca3d49
--- /dev/null
+++ b/tests/snippets/promql/test_metric_multiple_labels_with_spaces.txt
@@ -0,0 +1,22 @@
+---input---
+go_gc_duration_seconds{ instance="localhost:9090", job="alertmanager" }
+
+---tokens---
+'go_gc_duration_seconds' Name.Variable
+'{' Punctuation
+' ' Text.Whitespace
+'instance' Name.Label
+'=' Operator
+'"' Punctuation
+'localhost:9090' Literal.String
+'"' Punctuation
+',' Punctuation
+' ' Text.Whitespace
+'job' Name.Label
+'=' Operator
+'"' Punctuation
+'alertmanager' Literal.String
+'"' Punctuation
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/promql/test_metric_one_label.txt b/tests/snippets/promql/test_metric_one_label.txt
new file mode 100644
index 0000000..8baeafb
--- /dev/null
+++ b/tests/snippets/promql/test_metric_one_label.txt
@@ -0,0 +1,13 @@
+---input---
+go_gc_duration_seconds{instance="localhost:9090"}
+
+---tokens---
+'go_gc_duration_seconds' Name.Variable
+'{' Punctuation
+'instance' Name.Label
+'=' Operator
+'"' Punctuation
+'localhost:9090' Literal.String
+'"' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_comments.txt b/tests/snippets/properties/test_comments.txt
new file mode 100644
index 0000000..9bc6586
--- /dev/null
+++ b/tests/snippets/properties/test_comments.txt
@@ -0,0 +1,12 @@
+# Assures lines lead by either # or ! are recognized as a comment
+
+---input---
+! a comment
+# also a comment
+
+---tokens---
+'! a comment' Comment.Single
+'\n' Text.Whitespace
+
+'# also a comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_escaped_space_in_key.txt b/tests/snippets/properties/test_escaped_space_in_key.txt
new file mode 100644
index 0000000..eef0292
--- /dev/null
+++ b/tests/snippets/properties/test_escaped_space_in_key.txt
@@ -0,0 +1,10 @@
+---input---
+key = value
+
+---tokens---
+'key' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'value' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_escaped_space_in_value.txt b/tests/snippets/properties/test_escaped_space_in_value.txt
new file mode 100644
index 0000000..f76507f
--- /dev/null
+++ b/tests/snippets/properties/test_escaped_space_in_value.txt
@@ -0,0 +1,10 @@
+---input---
+key = doubleword\ value
+
+---tokens---
+'key' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'doubleword\\ value' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_just_key.txt b/tests/snippets/properties/test_just_key.txt
new file mode 100644
index 0000000..2665239
--- /dev/null
+++ b/tests/snippets/properties/test_just_key.txt
@@ -0,0 +1,6 @@
+---input---
+justkey
+
+---tokens---
+'justkey' Name.Attribute
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_just_key_with_space.txt b/tests/snippets/properties/test_just_key_with_space.txt
new file mode 100644
index 0000000..660c37c
--- /dev/null
+++ b/tests/snippets/properties/test_just_key_with_space.txt
@@ -0,0 +1,6 @@
+---input---
+just\ key
+
+---tokens---
+'just\\ key' Name.Attribute
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_leading_whitespace_comments.txt b/tests/snippets/properties/test_leading_whitespace_comments.txt
new file mode 100644
index 0000000..3a36afc
--- /dev/null
+++ b/tests/snippets/properties/test_leading_whitespace_comments.txt
@@ -0,0 +1,6 @@
+---input---
+# comment
+
+---tokens---
+'# comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/properties/test_space_delimited_kv_pair.txt b/tests/snippets/properties/test_space_delimited_kv_pair.txt
new file mode 100644
index 0000000..98961e4
--- /dev/null
+++ b/tests/snippets/properties/test_space_delimited_kv_pair.txt
@@ -0,0 +1,8 @@
+---input---
+key value
+
+---tokens---
+'key' Name.Attribute
+' ' Text.Whitespace
+'value' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/pwsh-session/test_continuation.txt b/tests/snippets/pwsh-session/test_continuation.txt
new file mode 100644
index 0000000..4735d36
--- /dev/null
+++ b/tests/snippets/pwsh-session/test_continuation.txt
@@ -0,0 +1,124 @@
+---input---
+PS> python -m doctest `
+> -o DONT_ACCEPT_TRUE_FOR_1 `
+> -o ELLIPSIS options.txt
+
+PS> $Params = @{
+> Height = 50
+> Width = 50
+> Depth = 50
+> Name = 'My Widget'
+> ID = '10dbe43f-0269-48b8-96cb-447a755add55'
+> }
+
+
+PS> ls |
+> grep "python"
+
+---tokens---
+'PS> ' Generic.Prompt
+'python' Name
+' ' Text
+'-m' Name
+' ' Text
+'doctest' Name
+' ' Text
+'`' Punctuation
+'\n' Text
+
+'> ' Generic.Prompt
+'-o' Name
+' ' Text
+'DONT_ACCEPT_TRUE_FOR_1' Name
+' ' Text
+'`' Punctuation
+'\n' Text
+
+'> ' Generic.Prompt
+'-o' Name
+' ' Text
+'ELLIPSIS' Name
+' ' Text
+'options' Name
+'.' Punctuation
+'txt' Name
+'\n' Text
+
+'\n' Generic.Output
+
+'PS> ' Generic.Prompt
+' ' Text
+'$Params' Name.Variable
+' ' Text
+'=' Punctuation
+' ' Text
+'@' Punctuation
+'{' Punctuation
+'\n' Text
+
+'> ' Generic.Prompt
+' ' Text
+'Height' Name
+' ' Text
+'=' Punctuation
+' ' Text
+'50' Name
+'\n' Text
+
+'> ' Generic.Prompt
+' ' Text
+'Width' Name
+' ' Text
+'=' Punctuation
+' ' Text
+'50' Name
+'\n' Text
+
+'> ' Generic.Prompt
+' ' Text
+'Depth' Name
+' ' Text
+'=' Punctuation
+' ' Text
+'50' Name
+'\n' Text
+
+'> ' Generic.Prompt
+' ' Text
+'Name' Name
+' ' Text
+'=' Punctuation
+' ' Text
+"'My Widget'" Literal.String.Single
+'\n' Text
+
+'> ' Generic.Prompt
+' ' Text
+'ID' Name
+' ' Text
+'=' Punctuation
+' ' Text
+"'10dbe43f-0269-48b8-96cb-447a755add55'" Literal.String.Single
+'\n' Text
+
+'> ' Generic.Prompt
+'}' Punctuation
+'\n' Text
+
+'\n' Generic.Output
+
+'\n' Generic.Output
+
+'PS> ' Generic.Prompt
+' ' Text
+'ls ' Name.Builtin
+'|' Punctuation
+'\n' Text
+
+'> ' Generic.Prompt
+'grep' Name
+' ' Text
+'"' Literal.String.Double
+'python' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text
diff --git a/tests/snippets/python/test_bytes_escape_codes.txt b/tests/snippets/python/test_bytes_escape_codes.txt
new file mode 100644
index 0000000..c1a3443
--- /dev/null
+++ b/tests/snippets/python/test_bytes_escape_codes.txt
@@ -0,0 +1,24 @@
+---input---
+b'\\ \n \x12 \777 \u1234 \U00010348 \N{Plus-Minus Sign}'
+
+---tokens---
+'b' Literal.String.Affix
+"'" Literal.String.Single
+'\\\\' Literal.String.Escape
+' ' Literal.String.Single
+'\\n' Literal.String.Escape
+' ' Literal.String.Single
+'\\x12' Literal.String.Escape
+' ' Literal.String.Single
+'\\777' Literal.String.Escape
+' ' Literal.String.Single
+'\\' Literal.String.Single
+'u1234 ' Literal.String.Single
+'\\' Literal.String.Single
+'U00010348 ' Literal.String.Single
+'\\' Literal.String.Single
+'N' Literal.String.Single
+'{' Literal.String.Single
+'Plus-Minus Sign}' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_floats.txt b/tests/snippets/python/test_floats.txt
new file mode 100644
index 0000000..3ae9dc9
--- /dev/null
+++ b/tests/snippets/python/test_floats.txt
@@ -0,0 +1,75 @@
+---input---
+123 -11 0 -0 0.5 .5 1. -0.5 +0.5 -.5 -1. 2e1 -2e1 2e -2e +2e e.3 -e.3 11.2e-3 -11.2e-3 5_6 5__6 _5 6_ 5.6_7 5.67_
+
+---tokens---
+'123' Literal.Number.Integer
+' ' Text
+'-' Operator
+'11' Literal.Number.Integer
+' ' Text
+'0' Literal.Number.Integer
+' ' Text
+'-' Operator
+'0' Literal.Number.Integer
+' ' Text
+'0.5' Literal.Number.Float
+' ' Text
+'.5' Literal.Number.Float
+' ' Text
+'1.' Literal.Number.Float
+' ' Text
+'-' Operator
+'0.5' Literal.Number.Float
+' ' Text
+'+' Operator
+'0.5' Literal.Number.Float
+' ' Text
+'-' Operator
+'.5' Literal.Number.Float
+' ' Text
+'-' Operator
+'1.' Literal.Number.Float
+' ' Text
+'2e1' Literal.Number.Float
+' ' Text
+'-' Operator
+'2e1' Literal.Number.Float
+' ' Text
+'2' Literal.Number.Integer
+'e' Name
+' ' Text
+'-' Operator
+'2' Literal.Number.Integer
+'e' Name
+' ' Text
+'+' Operator
+'2' Literal.Number.Integer
+'e' Name
+' ' Text
+'e' Name
+'.3' Literal.Number.Float
+' ' Text
+'-' Operator
+'e' Name
+'.3' Literal.Number.Float
+' ' Text
+'11.2e-3' Literal.Number.Float
+' ' Text
+'-' Operator
+'11.2e-3' Literal.Number.Float
+' ' Text
+'5_6' Literal.Number.Integer
+' ' Text
+'5' Literal.Number.Integer
+'__6' Name
+' ' Text
+'_5' Name
+' ' Text
+'6' Literal.Number.Integer
+'_' Name
+' ' Text
+'5.6_7' Literal.Number.Float
+' ' Text
+'5.67' Literal.Number.Float
+'_' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_01a.txt b/tests/snippets/python/test_fstring_01a.txt
new file mode 100644
index 0000000..a3b29a3
--- /dev/null
+++ b/tests/snippets/python/test_fstring_01a.txt
@@ -0,0 +1,25 @@
+---input---
+f'My name is {name}, my age next year is {age+1}, my anniversary is {anniversary:%A, %B %d, %Y}.'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'My name is ' Literal.String.Single
+'{' Literal.String.Interpol
+'name' Name
+'}' Literal.String.Interpol
+', my age next year is ' Literal.String.Single
+'{' Literal.String.Interpol
+'age' Name
+'+' Operator
+'1' Literal.Number.Integer
+'}' Literal.String.Interpol
+', my anniversary is ' Literal.String.Single
+'{' Literal.String.Interpol
+'anniversary' Name
+':' Literal.String.Interpol
+'%A, %B %d, %Y' Literal.String.Single
+'}' Literal.String.Interpol
+'.' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_01b.txt b/tests/snippets/python/test_fstring_01b.txt
new file mode 100644
index 0000000..0d2c343
--- /dev/null
+++ b/tests/snippets/python/test_fstring_01b.txt
@@ -0,0 +1,25 @@
+---input---
+f"My name is {name}, my age next year is {age+1}, my anniversary is {anniversary:%A, %B %d, %Y}."
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'My name is ' Literal.String.Double
+'{' Literal.String.Interpol
+'name' Name
+'}' Literal.String.Interpol
+', my age next year is ' Literal.String.Double
+'{' Literal.String.Interpol
+'age' Name
+'+' Operator
+'1' Literal.Number.Integer
+'}' Literal.String.Interpol
+', my anniversary is ' Literal.String.Double
+'{' Literal.String.Interpol
+'anniversary' Name
+':' Literal.String.Interpol
+'%A, %B %d, %Y' Literal.String.Double
+'}' Literal.String.Interpol
+'.' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_02a.txt b/tests/snippets/python/test_fstring_02a.txt
new file mode 100644
index 0000000..cfd41a9
--- /dev/null
+++ b/tests/snippets/python/test_fstring_02a.txt
@@ -0,0 +1,13 @@
+---input---
+f'He said his name is {name!r}.'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'He said his name is ' Literal.String.Single
+'{' Literal.String.Interpol
+'name' Name
+'!r}' Literal.String.Interpol
+'.' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_02b.txt b/tests/snippets/python/test_fstring_02b.txt
new file mode 100644
index 0000000..c72a77b
--- /dev/null
+++ b/tests/snippets/python/test_fstring_02b.txt
@@ -0,0 +1,13 @@
+---input---
+f"He said his name is {name!r}."
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'He said his name is ' Literal.String.Double
+'{' Literal.String.Interpol
+'name' Name
+'!r}' Literal.String.Interpol
+'.' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_03a.txt b/tests/snippets/python/test_fstring_03a.txt
new file mode 100644
index 0000000..366e495
--- /dev/null
+++ b/tests/snippets/python/test_fstring_03a.txt
@@ -0,0 +1,14 @@
+---input---
+f'input={value:#06x}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'input=' Literal.String.Single
+'{' Literal.String.Interpol
+'value' Name
+':' Literal.String.Interpol
+'#06x' Literal.String.Single
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_03b.txt b/tests/snippets/python/test_fstring_03b.txt
new file mode 100644
index 0000000..260e56d
--- /dev/null
+++ b/tests/snippets/python/test_fstring_03b.txt
@@ -0,0 +1,14 @@
+---input---
+f"input={value:#06x}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'input=' Literal.String.Double
+'{' Literal.String.Interpol
+'value' Name
+':' Literal.String.Interpol
+'#06x' Literal.String.Double
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_04a.txt b/tests/snippets/python/test_fstring_04a.txt
new file mode 100644
index 0000000..58516c9
--- /dev/null
+++ b/tests/snippets/python/test_fstring_04a.txt
@@ -0,0 +1,13 @@
+---input---
+f'{"quoted string"}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'"' Literal.String.Double
+'quoted string' Literal.String.Double
+'"' Literal.String.Double
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_04b.txt b/tests/snippets/python/test_fstring_04b.txt
new file mode 100644
index 0000000..40e1ea0
--- /dev/null
+++ b/tests/snippets/python/test_fstring_04b.txt
@@ -0,0 +1,13 @@
+---input---
+f"{'quoted string'}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+"'" Literal.String.Single
+'quoted string' Literal.String.Single
+"'" Literal.String.Single
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_05a.txt b/tests/snippets/python/test_fstring_05a.txt
new file mode 100644
index 0000000..77a5c10
--- /dev/null
+++ b/tests/snippets/python/test_fstring_05a.txt
@@ -0,0 +1,16 @@
+---input---
+f'{f"{inner}"}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'inner' Name
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_05b.txt b/tests/snippets/python/test_fstring_05b.txt
new file mode 100644
index 0000000..9a0dc5a
--- /dev/null
+++ b/tests/snippets/python/test_fstring_05b.txt
@@ -0,0 +1,16 @@
+---input---
+f"{f'{inner}'}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'inner' Name
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_06a.txt b/tests/snippets/python/test_fstring_06a.txt
new file mode 100644
index 0000000..b8b33c0
--- /dev/null
+++ b/tests/snippets/python/test_fstring_06a.txt
@@ -0,0 +1,16 @@
+# SyntaxError: f-string expression part cannot include a backslash
+
+---input---
+f'{\'quoted string\'}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'\\' Error
+"'" Literal.String.Single
+'quoted string' Literal.String.Single
+"\\'" Literal.String.Escape
+'}' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_06b.txt b/tests/snippets/python/test_fstring_06b.txt
new file mode 100644
index 0000000..0712321
--- /dev/null
+++ b/tests/snippets/python/test_fstring_06b.txt
@@ -0,0 +1,16 @@
+# SyntaxError: f-string expression part cannot include a backslash
+
+---input---
+f"{\"quoted string\"}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'\\' Error
+'"' Literal.String.Double
+'quoted string' Literal.String.Double
+'\\"' Literal.String.Escape
+'}' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_07a.txt b/tests/snippets/python/test_fstring_07a.txt
new file mode 100644
index 0000000..c597ea9
--- /dev/null
+++ b/tests/snippets/python/test_fstring_07a.txt
@@ -0,0 +1,17 @@
+---input---
+f'{{ {4*10} }}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{{' Literal.String.Escape
+' ' Literal.String.Single
+'{' Literal.String.Interpol
+'4' Literal.Number.Integer
+'*' Operator
+'10' Literal.Number.Integer
+'}' Literal.String.Interpol
+' ' Literal.String.Single
+'}}' Literal.String.Escape
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_07b.txt b/tests/snippets/python/test_fstring_07b.txt
new file mode 100644
index 0000000..413c158
--- /dev/null
+++ b/tests/snippets/python/test_fstring_07b.txt
@@ -0,0 +1,17 @@
+---input---
+f"{{ {4*10} }}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{{' Literal.String.Escape
+' ' Literal.String.Double
+'{' Literal.String.Interpol
+'4' Literal.Number.Integer
+'*' Operator
+'10' Literal.Number.Integer
+'}' Literal.String.Interpol
+' ' Literal.String.Double
+'}}' Literal.String.Escape
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_08a.txt b/tests/snippets/python/test_fstring_08a.txt
new file mode 100644
index 0000000..39dd887
--- /dev/null
+++ b/tests/snippets/python/test_fstring_08a.txt
@@ -0,0 +1,15 @@
+---input---
+f'{{{4*10}}}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{{' Literal.String.Escape
+'{' Literal.String.Interpol
+'4' Literal.Number.Integer
+'*' Operator
+'10' Literal.Number.Integer
+'}' Literal.String.Interpol
+'}}' Literal.String.Escape
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_08b.txt b/tests/snippets/python/test_fstring_08b.txt
new file mode 100644
index 0000000..458c697
--- /dev/null
+++ b/tests/snippets/python/test_fstring_08b.txt
@@ -0,0 +1,15 @@
+---input---
+f"{{{4*10}}}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{{' Literal.String.Escape
+'{' Literal.String.Interpol
+'4' Literal.Number.Integer
+'*' Operator
+'10' Literal.Number.Integer
+'}' Literal.String.Interpol
+'}}' Literal.String.Escape
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_09a.txt b/tests/snippets/python/test_fstring_09a.txt
new file mode 100644
index 0000000..00f3e7f
--- /dev/null
+++ b/tests/snippets/python/test_fstring_09a.txt
@@ -0,0 +1,14 @@
+---input---
+fr'x={4*10}'
+
+---tokens---
+'fr' Literal.String.Affix
+"'" Literal.String.Single
+'x=' Literal.String.Single
+'{' Literal.String.Interpol
+'4' Literal.Number.Integer
+'*' Operator
+'10' Literal.Number.Integer
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_09b.txt b/tests/snippets/python/test_fstring_09b.txt
new file mode 100644
index 0000000..01d74e9
--- /dev/null
+++ b/tests/snippets/python/test_fstring_09b.txt
@@ -0,0 +1,14 @@
+---input---
+fr"x={4*10}"
+
+---tokens---
+'fr' Literal.String.Affix
+'"' Literal.String.Double
+'x=' Literal.String.Double
+'{' Literal.String.Interpol
+'4' Literal.Number.Integer
+'*' Operator
+'10' Literal.Number.Integer
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_10a.txt b/tests/snippets/python/test_fstring_10a.txt
new file mode 100644
index 0000000..a2e11ba
--- /dev/null
+++ b/tests/snippets/python/test_fstring_10a.txt
@@ -0,0 +1,18 @@
+---input---
+f'abc {a["x"]} def'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'abc ' Literal.String.Single
+'{' Literal.String.Interpol
+'a' Name
+'[' Punctuation
+'"' Literal.String.Double
+'x' Literal.String.Double
+'"' Literal.String.Double
+']' Punctuation
+'}' Literal.String.Interpol
+' def' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_10b.txt b/tests/snippets/python/test_fstring_10b.txt
new file mode 100644
index 0000000..21d116c
--- /dev/null
+++ b/tests/snippets/python/test_fstring_10b.txt
@@ -0,0 +1,18 @@
+---input---
+f"abc {a['x']} def"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'abc ' Literal.String.Double
+'{' Literal.String.Interpol
+'a' Name
+'[' Punctuation
+"'" Literal.String.Single
+'x' Literal.String.Single
+"'" Literal.String.Single
+']' Punctuation
+'}' Literal.String.Interpol
+' def' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_11a.txt b/tests/snippets/python/test_fstring_11a.txt
new file mode 100644
index 0000000..ce20e7b
--- /dev/null
+++ b/tests/snippets/python/test_fstring_11a.txt
@@ -0,0 +1,18 @@
+---input---
+f'''abc {a['x']} def'''
+
+---tokens---
+'f' Literal.String.Affix
+"'''" Literal.String.Single
+'abc ' Literal.String.Single
+'{' Literal.String.Interpol
+'a' Name
+'[' Punctuation
+"'" Literal.String.Single
+'x' Literal.String.Single
+"'" Literal.String.Single
+']' Punctuation
+'}' Literal.String.Interpol
+' def' Literal.String.Single
+"'''" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_11b.txt b/tests/snippets/python/test_fstring_11b.txt
new file mode 100644
index 0000000..baf1370
--- /dev/null
+++ b/tests/snippets/python/test_fstring_11b.txt
@@ -0,0 +1,18 @@
+---input---
+f"""abc {a["x"]} def"""
+
+---tokens---
+'f' Literal.String.Affix
+'"""' Literal.String.Double
+'abc ' Literal.String.Double
+'{' Literal.String.Interpol
+'a' Name
+'[' Punctuation
+'"' Literal.String.Double
+'x' Literal.String.Double
+'"' Literal.String.Double
+']' Punctuation
+'}' Literal.String.Interpol
+' def' Literal.String.Double
+'"""' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_12a.txt b/tests/snippets/python/test_fstring_12a.txt
new file mode 100644
index 0000000..9988804
--- /dev/null
+++ b/tests/snippets/python/test_fstring_12a.txt
@@ -0,0 +1,16 @@
+---input---
+f'''{x
++1}'''
+
+---tokens---
+'f' Literal.String.Affix
+"'''" Literal.String.Single
+'{' Literal.String.Interpol
+'x' Name
+'\n' Text.Whitespace
+
+'+' Operator
+'1' Literal.Number.Integer
+'}' Literal.String.Interpol
+"'''" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_12b.txt b/tests/snippets/python/test_fstring_12b.txt
new file mode 100644
index 0000000..878b7a0
--- /dev/null
+++ b/tests/snippets/python/test_fstring_12b.txt
@@ -0,0 +1,16 @@
+---input---
+f"""{x
++1}"""
+
+---tokens---
+'f' Literal.String.Affix
+'"""' Literal.String.Double
+'{' Literal.String.Interpol
+'x' Name
+'\n' Text.Whitespace
+
+'+' Operator
+'1' Literal.Number.Integer
+'}' Literal.String.Interpol
+'"""' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_13a.txt b/tests/snippets/python/test_fstring_13a.txt
new file mode 100644
index 0000000..e6e97e8
--- /dev/null
+++ b/tests/snippets/python/test_fstring_13a.txt
@@ -0,0 +1,17 @@
+---input---
+f'''{d[0
+]}'''
+
+---tokens---
+'f' Literal.String.Affix
+"'''" Literal.String.Single
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'0' Literal.Number.Integer
+'\n' Text.Whitespace
+
+']' Punctuation
+'}' Literal.String.Interpol
+"'''" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_13b.txt b/tests/snippets/python/test_fstring_13b.txt
new file mode 100644
index 0000000..0a3aa56
--- /dev/null
+++ b/tests/snippets/python/test_fstring_13b.txt
@@ -0,0 +1,17 @@
+---input---
+f"""{d[0
+]}"""
+
+---tokens---
+'f' Literal.String.Affix
+'"""' Literal.String.Double
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'0' Literal.Number.Integer
+'\n' Text.Whitespace
+
+']' Punctuation
+'}' Literal.String.Interpol
+'"""' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_14a.txt b/tests/snippets/python/test_fstring_14a.txt
new file mode 100644
index 0000000..a826835
--- /dev/null
+++ b/tests/snippets/python/test_fstring_14a.txt
@@ -0,0 +1,20 @@
+---input---
+f'result: {value:{width}.{precision}}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'result: ' Literal.String.Single
+'{' Literal.String.Interpol
+'value' Name
+':' Literal.String.Interpol
+'{' Literal.String.Interpol
+'width' Name
+'}' Literal.String.Interpol
+'.' Literal.String.Single
+'{' Literal.String.Interpol
+'precision' Name
+'}' Literal.String.Interpol
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_14b.txt b/tests/snippets/python/test_fstring_14b.txt
new file mode 100644
index 0000000..3100883
--- /dev/null
+++ b/tests/snippets/python/test_fstring_14b.txt
@@ -0,0 +1,20 @@
+---input---
+f"result: {value:{width}.{precision}}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'result: ' Literal.String.Double
+'{' Literal.String.Interpol
+'value' Name
+':' Literal.String.Interpol
+'{' Literal.String.Interpol
+'width' Name
+'}' Literal.String.Interpol
+'.' Literal.String.Double
+'{' Literal.String.Interpol
+'precision' Name
+'}' Literal.String.Interpol
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_15a.txt b/tests/snippets/python/test_fstring_15a.txt
new file mode 100644
index 0000000..f8f62b8
--- /dev/null
+++ b/tests/snippets/python/test_fstring_15a.txt
@@ -0,0 +1,42 @@
+---input---
+'a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e'
+
+---tokens---
+"'" Literal.String.Single
+'a' Literal.String.Single
+"'" Literal.String.Single
+' ' Text
+"'" Literal.String.Single
+'b' Literal.String.Single
+"'" Literal.String.Single
+' ' Text
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'x' Name
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+' ' Text
+"'" Literal.String.Single
+'{c}' Literal.String.Interpol
+"'" Literal.String.Single
+' ' Text
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'str<' Literal.String.Single
+'{' Literal.String.Interpol
+'y' Name
+':' Literal.String.Interpol
+'^4' Literal.String.Single
+'}' Literal.String.Interpol
+'>' Literal.String.Single
+"'" Literal.String.Single
+' ' Text
+"'" Literal.String.Single
+'d' Literal.String.Single
+"'" Literal.String.Single
+' ' Text
+"'" Literal.String.Single
+'e' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_15b.txt b/tests/snippets/python/test_fstring_15b.txt
new file mode 100644
index 0000000..7b5307c
--- /dev/null
+++ b/tests/snippets/python/test_fstring_15b.txt
@@ -0,0 +1,42 @@
+---input---
+"a" "b" f"{x}" "{c}" f"str<{y:^4}>" "d" "e"
+
+---tokens---
+'"' Literal.String.Double
+'a' Literal.String.Double
+'"' Literal.String.Double
+' ' Text
+'"' Literal.String.Double
+'b' Literal.String.Double
+'"' Literal.String.Double
+' ' Text
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'x' Name
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+' ' Text
+'"' Literal.String.Double
+'{c}' Literal.String.Interpol
+'"' Literal.String.Double
+' ' Text
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'str<' Literal.String.Double
+'{' Literal.String.Interpol
+'y' Name
+':' Literal.String.Interpol
+'^4' Literal.String.Double
+'}' Literal.String.Interpol
+'>' Literal.String.Double
+'"' Literal.String.Double
+' ' Text
+'"' Literal.String.Double
+'d' Literal.String.Double
+'"' Literal.String.Double
+' ' Text
+'"' Literal.String.Double
+'e' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_16a.txt b/tests/snippets/python/test_fstring_16a.txt
new file mode 100644
index 0000000..15b11e8
--- /dev/null
+++ b/tests/snippets/python/test_fstring_16a.txt
@@ -0,0 +1,18 @@
+---input---
+f'{i}:{d[i]}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'i' Name
+'}' Literal.String.Interpol
+':' Literal.String.Single
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'i' Name
+']' Punctuation
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_16b.txt b/tests/snippets/python/test_fstring_16b.txt
new file mode 100644
index 0000000..e917516
--- /dev/null
+++ b/tests/snippets/python/test_fstring_16b.txt
@@ -0,0 +1,18 @@
+---input---
+f"{i}:{d[i]}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'i' Name
+'}' Literal.String.Interpol
+':' Literal.String.Double
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'i' Name
+']' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_17a.txt b/tests/snippets/python/test_fstring_17a.txt
new file mode 100644
index 0000000..9eefd91
--- /dev/null
+++ b/tests/snippets/python/test_fstring_17a.txt
@@ -0,0 +1,14 @@
+---input---
+f'x = {x:+3}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'x = ' Literal.String.Single
+'{' Literal.String.Interpol
+'x' Name
+':' Literal.String.Interpol
+'+3' Literal.String.Single
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_17b.txt b/tests/snippets/python/test_fstring_17b.txt
new file mode 100644
index 0000000..2d26481
--- /dev/null
+++ b/tests/snippets/python/test_fstring_17b.txt
@@ -0,0 +1,14 @@
+---input---
+f"x = {x:+3}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'x = ' Literal.String.Double
+'{' Literal.String.Interpol
+'x' Name
+':' Literal.String.Interpol
+'+3' Literal.String.Double
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_18a.txt b/tests/snippets/python/test_fstring_18a.txt
new file mode 100644
index 0000000..5455db7
--- /dev/null
+++ b/tests/snippets/python/test_fstring_18a.txt
@@ -0,0 +1,25 @@
+---input---
+f'{fn(lst,2)} {fn(lst,3)}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'fn' Name
+'(' Punctuation
+'lst' Name
+',' Punctuation
+'2' Literal.Number.Integer
+')' Punctuation
+'}' Literal.String.Interpol
+' ' Literal.String.Single
+'{' Literal.String.Interpol
+'fn' Name
+'(' Punctuation
+'lst' Name
+',' Punctuation
+'3' Literal.Number.Integer
+')' Punctuation
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_18b.txt b/tests/snippets/python/test_fstring_18b.txt
new file mode 100644
index 0000000..356cc3c
--- /dev/null
+++ b/tests/snippets/python/test_fstring_18b.txt
@@ -0,0 +1,25 @@
+---input---
+f"{fn(lst,2)} {fn(lst,3)}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'fn' Name
+'(' Punctuation
+'lst' Name
+',' Punctuation
+'2' Literal.Number.Integer
+')' Punctuation
+'}' Literal.String.Interpol
+' ' Literal.String.Double
+'{' Literal.String.Interpol
+'fn' Name
+'(' Punctuation
+'lst' Name
+',' Punctuation
+'3' Literal.Number.Integer
+')' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_19a.txt b/tests/snippets/python/test_fstring_19a.txt
new file mode 100644
index 0000000..7e7cde0
--- /dev/null
+++ b/tests/snippets/python/test_fstring_19a.txt
@@ -0,0 +1,46 @@
+---input---
+f'mapping is { {a:b for (a, b) in ((1, 2), (3, 4))} }'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'mapping is ' Literal.String.Single
+'{' Literal.String.Interpol
+' ' Text.Whitespace
+'{' Punctuation
+'a' Name
+':' Punctuation
+'b' Name
+' ' Text.Whitespace
+'for' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'a' Name
+',' Punctuation
+' ' Text.Whitespace
+'b' Name
+')' Punctuation
+' ' Text.Whitespace
+'in' Operator.Word
+' ' Text.Whitespace
+'(' Punctuation
+'(' Punctuation
+'1' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+')' Punctuation
+',' Punctuation
+' ' Text.Whitespace
+'(' Punctuation
+'3' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'4' Literal.Number.Integer
+')' Punctuation
+')' Punctuation
+'}' Punctuation
+' ' Text.Whitespace
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_19b.txt b/tests/snippets/python/test_fstring_19b.txt
new file mode 100644
index 0000000..3ae438a
--- /dev/null
+++ b/tests/snippets/python/test_fstring_19b.txt
@@ -0,0 +1,46 @@
+---input---
+f"mapping is { {a:b for (a, b) in ((1, 2), (3, 4))} }"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'mapping is ' Literal.String.Double
+'{' Literal.String.Interpol
+' ' Text.Whitespace
+'{' Punctuation
+'a' Name
+':' Punctuation
+'b' Name
+' ' Text.Whitespace
+'for' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'a' Name
+',' Punctuation
+' ' Text.Whitespace
+'b' Name
+')' Punctuation
+' ' Text.Whitespace
+'in' Operator.Word
+' ' Text.Whitespace
+'(' Punctuation
+'(' Punctuation
+'1' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+')' Punctuation
+',' Punctuation
+' ' Text.Whitespace
+'(' Punctuation
+'3' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'4' Literal.Number.Integer
+')' Punctuation
+')' Punctuation
+'}' Punctuation
+' ' Text.Whitespace
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_20a.txt b/tests/snippets/python/test_fstring_20a.txt
new file mode 100644
index 0000000..206e436
--- /dev/null
+++ b/tests/snippets/python/test_fstring_20a.txt
@@ -0,0 +1,17 @@
+---input---
+f'a={d["a"]}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'a=' Literal.String.Single
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'"' Literal.String.Double
+'a' Literal.String.Double
+'"' Literal.String.Double
+']' Punctuation
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_20b.txt b/tests/snippets/python/test_fstring_20b.txt
new file mode 100644
index 0000000..fb3acc4
--- /dev/null
+++ b/tests/snippets/python/test_fstring_20b.txt
@@ -0,0 +1,17 @@
+---input---
+f"a={d['a']}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'a=' Literal.String.Double
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+"'" Literal.String.Single
+'a' Literal.String.Single
+"'" Literal.String.Single
+']' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_21a.txt b/tests/snippets/python/test_fstring_21a.txt
new file mode 100644
index 0000000..0107431
--- /dev/null
+++ b/tests/snippets/python/test_fstring_21a.txt
@@ -0,0 +1,15 @@
+---input---
+f'a={d[a]}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'a=' Literal.String.Single
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'a' Name
+']' Punctuation
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_21b.txt b/tests/snippets/python/test_fstring_21b.txt
new file mode 100644
index 0000000..f659d54
--- /dev/null
+++ b/tests/snippets/python/test_fstring_21b.txt
@@ -0,0 +1,15 @@
+---input---
+f"a={d[a]}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'a=' Literal.String.Double
+'{' Literal.String.Interpol
+'d' Name
+'[' Punctuation
+'a' Name
+']' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_22a.txt b/tests/snippets/python/test_fstring_22a.txt
new file mode 100644
index 0000000..a1066e9
--- /dev/null
+++ b/tests/snippets/python/test_fstring_22a.txt
@@ -0,0 +1,14 @@
+---input---
+fr'{header}:\s+'
+
+---tokens---
+'fr' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'header' Name
+'}' Literal.String.Interpol
+':' Literal.String.Single
+'\\' Literal.String.Single
+'s+' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_22b.txt b/tests/snippets/python/test_fstring_22b.txt
new file mode 100644
index 0000000..79cb6b4
--- /dev/null
+++ b/tests/snippets/python/test_fstring_22b.txt
@@ -0,0 +1,14 @@
+---input---
+fr"{header}:\s+"
+
+---tokens---
+'fr' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'header' Name
+'}' Literal.String.Interpol
+':' Literal.String.Double
+'\\' Literal.String.Double
+'s+' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_23a.txt b/tests/snippets/python/test_fstring_23a.txt
new file mode 100644
index 0000000..ef5cbd4
--- /dev/null
+++ b/tests/snippets/python/test_fstring_23a.txt
@@ -0,0 +1,11 @@
+---input---
+f'{a!r}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'a' Name
+'!r}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_23b.txt b/tests/snippets/python/test_fstring_23b.txt
new file mode 100644
index 0000000..572fe04
--- /dev/null
+++ b/tests/snippets/python/test_fstring_23b.txt
@@ -0,0 +1,11 @@
+---input---
+f"{a!r}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'a' Name
+'!r}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_24a.txt b/tests/snippets/python/test_fstring_24a.txt
new file mode 100644
index 0000000..d45385b
--- /dev/null
+++ b/tests/snippets/python/test_fstring_24a.txt
@@ -0,0 +1,23 @@
+---input---
+f'{(lambda x: x*2)(3)}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'(' Punctuation
+'lambda' Keyword
+' ' Text.Whitespace
+'x' Name
+':' Punctuation
+' ' Text.Whitespace
+'x' Name
+'*' Operator
+'2' Literal.Number.Integer
+')' Punctuation
+'(' Punctuation
+'3' Literal.Number.Integer
+')' Punctuation
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_24b.txt b/tests/snippets/python/test_fstring_24b.txt
new file mode 100644
index 0000000..5e83b19
--- /dev/null
+++ b/tests/snippets/python/test_fstring_24b.txt
@@ -0,0 +1,23 @@
+---input---
+f"{(lambda x: x*2)(3)}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'(' Punctuation
+'lambda' Keyword
+' ' Text.Whitespace
+'x' Name
+':' Punctuation
+' ' Text.Whitespace
+'x' Name
+'*' Operator
+'2' Literal.Number.Integer
+')' Punctuation
+'(' Punctuation
+'3' Literal.Number.Integer
+')' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_25a.txt b/tests/snippets/python/test_fstring_25a.txt
new file mode 100644
index 0000000..470db36
--- /dev/null
+++ b/tests/snippets/python/test_fstring_25a.txt
@@ -0,0 +1,24 @@
+---input---
+extra = f'{extra},waiters:{len(self._waiters)}'
+
+---tokens---
+'extra' Name
+' ' Text
+'=' Operator
+' ' Text
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'extra' Name
+'}' Literal.String.Interpol
+',waiters:' Literal.String.Single
+'{' Literal.String.Interpol
+'len' Name.Builtin
+'(' Punctuation
+'self' Name.Builtin.Pseudo
+'.' Operator
+'_waiters' Name
+')' Punctuation
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_25b.txt b/tests/snippets/python/test_fstring_25b.txt
new file mode 100644
index 0000000..94893e5
--- /dev/null
+++ b/tests/snippets/python/test_fstring_25b.txt
@@ -0,0 +1,24 @@
+---input---
+extra = f"{extra},waiters:{len(self._waiters)}"
+
+---tokens---
+'extra' Name
+' ' Text
+'=' Operator
+' ' Text
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'extra' Name
+'}' Literal.String.Interpol
+',waiters:' Literal.String.Double
+'{' Literal.String.Interpol
+'len' Name.Builtin
+'(' Punctuation
+'self' Name.Builtin.Pseudo
+'.' Operator
+'_waiters' Name
+')' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_26a.txt b/tests/snippets/python/test_fstring_26a.txt
new file mode 100644
index 0000000..01a231d
--- /dev/null
+++ b/tests/snippets/python/test_fstring_26a.txt
@@ -0,0 +1,20 @@
+---input---
+message.append(f" [line {lineno:2d}]")
+
+---tokens---
+'message' Name
+'.' Operator
+'append' Name
+'(' Punctuation
+'f' Literal.String.Affix
+'"' Literal.String.Double
+' [line ' Literal.String.Double
+'{' Literal.String.Interpol
+'lineno' Name
+':' Literal.String.Interpol
+'2d' Literal.String.Double
+'}' Literal.String.Interpol
+']' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_26b.txt b/tests/snippets/python/test_fstring_26b.txt
new file mode 100644
index 0000000..a813149
--- /dev/null
+++ b/tests/snippets/python/test_fstring_26b.txt
@@ -0,0 +1,20 @@
+---input---
+message.append(f' [line {lineno:2d}]')
+
+---tokens---
+'message' Name
+'.' Operator
+'append' Name
+'(' Punctuation
+'f' Literal.String.Affix
+"'" Literal.String.Single
+' [line ' Literal.String.Single
+'{' Literal.String.Interpol
+'lineno' Name
+':' Literal.String.Interpol
+'2d' Literal.String.Single
+'}' Literal.String.Interpol
+']' Literal.String.Single
+"'" Literal.String.Single
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_27a.txt b/tests/snippets/python/test_fstring_27a.txt
new file mode 100644
index 0000000..75af0c1
--- /dev/null
+++ b/tests/snippets/python/test_fstring_27a.txt
@@ -0,0 +1,11 @@
+---input---
+f"{foo=}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'foo' Name
+'=}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_27b.txt b/tests/snippets/python/test_fstring_27b.txt
new file mode 100644
index 0000000..71e6e51
--- /dev/null
+++ b/tests/snippets/python/test_fstring_27b.txt
@@ -0,0 +1,11 @@
+---input---
+f'{foo=}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'foo' Name
+'=}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_28a.txt b/tests/snippets/python/test_fstring_28a.txt
new file mode 100644
index 0000000..cdc65db
--- /dev/null
+++ b/tests/snippets/python/test_fstring_28a.txt
@@ -0,0 +1,11 @@
+---input---
+f'{foo=!s}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'foo' Name
+'=!s}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_28b.txt b/tests/snippets/python/test_fstring_28b.txt
new file mode 100644
index 0000000..99cf4f3
--- /dev/null
+++ b/tests/snippets/python/test_fstring_28b.txt
@@ -0,0 +1,11 @@
+---input---
+f"{foo=!s}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'foo' Name
+'=!s}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_29a.txt b/tests/snippets/python/test_fstring_29a.txt
new file mode 100644
index 0000000..2100b3b
--- /dev/null
+++ b/tests/snippets/python/test_fstring_29a.txt
@@ -0,0 +1,15 @@
+---input---
+f"{math.pi=!f:.2f}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'math' Name
+'.' Operator
+'pi' Name
+'=!f:' Literal.String.Interpol
+'.2f' Literal.String.Double
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_29b.txt b/tests/snippets/python/test_fstring_29b.txt
new file mode 100644
index 0000000..4cc18dd
--- /dev/null
+++ b/tests/snippets/python/test_fstring_29b.txt
@@ -0,0 +1,15 @@
+---input---
+f'{math.pi=!f:.2f}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'math' Name
+'.' Operator
+'pi' Name
+'=!f:' Literal.String.Interpol
+'.2f' Literal.String.Single
+'}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_30a.txt b/tests/snippets/python/test_fstring_30a.txt
new file mode 100644
index 0000000..3db443e
--- /dev/null
+++ b/tests/snippets/python/test_fstring_30a.txt
@@ -0,0 +1,16 @@
+---input---
+f"{ chr(65) =}"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+' ' Text.Whitespace
+'chr' Name.Builtin
+'(' Punctuation
+'65' Literal.Number.Integer
+')' Punctuation
+' ' Text.Whitespace
+'=}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_30b.txt b/tests/snippets/python/test_fstring_30b.txt
new file mode 100644
index 0000000..5f082e5
--- /dev/null
+++ b/tests/snippets/python/test_fstring_30b.txt
@@ -0,0 +1,16 @@
+---input---
+f'{ chr(65) =}'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+' ' Text.Whitespace
+'chr' Name.Builtin
+'(' Punctuation
+'65' Literal.Number.Integer
+')' Punctuation
+' ' Text.Whitespace
+'=}' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_31a.txt b/tests/snippets/python/test_fstring_31a.txt
new file mode 100644
index 0000000..3e9c091
--- /dev/null
+++ b/tests/snippets/python/test_fstring_31a.txt
@@ -0,0 +1,15 @@
+---input---
+f"{chr(65) = }"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'chr' Name.Builtin
+'(' Punctuation
+'65' Literal.Number.Integer
+')' Punctuation
+' ' Text.Whitespace
+'= }' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_31b.txt b/tests/snippets/python/test_fstring_31b.txt
new file mode 100644
index 0000000..37adb3e
--- /dev/null
+++ b/tests/snippets/python/test_fstring_31b.txt
@@ -0,0 +1,15 @@
+---input---
+f'{chr(65) = }'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'chr' Name.Builtin
+'(' Punctuation
+'65' Literal.Number.Integer
+')' Punctuation
+' ' Text.Whitespace
+'= }' Literal.String.Interpol
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_32a.txt b/tests/snippets/python/test_fstring_32a.txt
new file mode 100644
index 0000000..dcfc39d
--- /dev/null
+++ b/tests/snippets/python/test_fstring_32a.txt
@@ -0,0 +1,15 @@
+---input---
+f'*{n=:30}*'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'*' Literal.String.Single
+'{' Literal.String.Interpol
+'n' Name
+'=:' Literal.String.Interpol
+'30' Literal.String.Single
+'}' Literal.String.Interpol
+'*' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_32b.txt b/tests/snippets/python/test_fstring_32b.txt
new file mode 100644
index 0000000..1f8a450
--- /dev/null
+++ b/tests/snippets/python/test_fstring_32b.txt
@@ -0,0 +1,15 @@
+---input---
+f"*{n=:30}*"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'*' Literal.String.Double
+'{' Literal.String.Interpol
+'n' Name
+'=:' Literal.String.Interpol
+'30' Literal.String.Double
+'}' Literal.String.Interpol
+'*' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_33a.txt b/tests/snippets/python/test_fstring_33a.txt
new file mode 100644
index 0000000..5318b55
--- /dev/null
+++ b/tests/snippets/python/test_fstring_33a.txt
@@ -0,0 +1,15 @@
+---input---
+f'*{n=!r:30}*'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'*' Literal.String.Single
+'{' Literal.String.Interpol
+'n' Name
+'=!r:' Literal.String.Interpol
+'30' Literal.String.Single
+'}' Literal.String.Interpol
+'*' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_33b.txt b/tests/snippets/python/test_fstring_33b.txt
new file mode 100644
index 0000000..a0211dd
--- /dev/null
+++ b/tests/snippets/python/test_fstring_33b.txt
@@ -0,0 +1,15 @@
+---input---
+f"*{n=!r:30}*"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'*' Literal.String.Double
+'{' Literal.String.Interpol
+'n' Name
+'=!r:' Literal.String.Interpol
+'30' Literal.String.Double
+'}' Literal.String.Interpol
+'*' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_34a.txt b/tests/snippets/python/test_fstring_34a.txt
new file mode 100644
index 0000000..9b80cc9
--- /dev/null
+++ b/tests/snippets/python/test_fstring_34a.txt
@@ -0,0 +1,20 @@
+---input---
+f"*{f'{n=}':30}*"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'*' Literal.String.Double
+'{' Literal.String.Interpol
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'{' Literal.String.Interpol
+'n' Name
+'=}' Literal.String.Interpol
+"'" Literal.String.Single
+':' Literal.String.Interpol
+'30' Literal.String.Double
+'}' Literal.String.Interpol
+'*' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_34b.txt b/tests/snippets/python/test_fstring_34b.txt
new file mode 100644
index 0000000..5abf37f
--- /dev/null
+++ b/tests/snippets/python/test_fstring_34b.txt
@@ -0,0 +1,20 @@
+---input---
+f'*{f"{n=}":30}*'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'*' Literal.String.Single
+'{' Literal.String.Interpol
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'{' Literal.String.Interpol
+'n' Name
+'=}' Literal.String.Interpol
+'"' Literal.String.Double
+':' Literal.String.Interpol
+'30' Literal.String.Single
+'}' Literal.String.Interpol
+'*' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_35a.txt b/tests/snippets/python/test_fstring_35a.txt
new file mode 100644
index 0000000..b4e1041
--- /dev/null
+++ b/tests/snippets/python/test_fstring_35a.txt
@@ -0,0 +1,15 @@
+---input---
+f'*{n=:+<30}*'
+
+---tokens---
+'f' Literal.String.Affix
+"'" Literal.String.Single
+'*' Literal.String.Single
+'{' Literal.String.Interpol
+'n' Name
+'=:' Literal.String.Interpol
+'+<30' Literal.String.Single
+'}' Literal.String.Interpol
+'*' Literal.String.Single
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_35b.txt b/tests/snippets/python/test_fstring_35b.txt
new file mode 100644
index 0000000..773e7a4
--- /dev/null
+++ b/tests/snippets/python/test_fstring_35b.txt
@@ -0,0 +1,15 @@
+---input---
+f"*{n=:+<30}*"
+
+---tokens---
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'*' Literal.String.Double
+'{' Literal.String.Interpol
+'n' Name
+'=:' Literal.String.Interpol
+'+<30' Literal.String.Double
+'}' Literal.String.Interpol
+'*' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_36a.txt b/tests/snippets/python/test_fstring_36a.txt
new file mode 100644
index 0000000..773e38e
--- /dev/null
+++ b/tests/snippets/python/test_fstring_36a.txt
@@ -0,0 +1,16 @@
+---input---
+
+f'''{foo
+ = !s:20}'''
+
+---tokens---
+'f' Literal.String.Affix
+"'''" Literal.String.Single
+'{' Literal.String.Interpol
+'foo' Name
+'\n ' Text.Whitespace
+'= !s:' Literal.String.Interpol
+'20' Literal.String.Single
+'}' Literal.String.Interpol
+"'''" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_fstring_36b.txt b/tests/snippets/python/test_fstring_36b.txt
new file mode 100644
index 0000000..da79ce2
--- /dev/null
+++ b/tests/snippets/python/test_fstring_36b.txt
@@ -0,0 +1,16 @@
+---input---
+
+f"""{foo
+ = !s:20}"""
+
+---tokens---
+'f' Literal.String.Affix
+'"""' Literal.String.Double
+'{' Literal.String.Interpol
+'foo' Name
+'\n ' Text.Whitespace
+'= !s:' Literal.String.Interpol
+'20' Literal.String.Double
+'}' Literal.String.Interpol
+'"""' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_needs_name.txt b/tests/snippets/python/test_needs_name.txt
new file mode 100644
index 0000000..f121da0
--- /dev/null
+++ b/tests/snippets/python/test_needs_name.txt
@@ -0,0 +1,55 @@
+# Tests that '@' is recognized as an Operator
+
+---input---
+S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)
+
+---tokens---
+'S' Name
+' ' Text
+'=' Operator
+' ' Text
+'(' Punctuation
+'H' Name
+' ' Text
+'@' Operator
+' ' Text
+'beta' Name
+' ' Text
+'-' Operator
+' ' Text
+'r' Name
+')' Punctuation
+'.' Operator
+'T' Name
+' ' Text
+'@' Operator
+' ' Text
+'inv' Name
+'(' Punctuation
+'H' Name
+' ' Text
+'@' Operator
+' ' Text
+'V' Name
+' ' Text
+'@' Operator
+' ' Text
+'H' Name
+'.' Operator
+'T' Name
+')' Punctuation
+' ' Text
+'@' Operator
+' ' Text
+'(' Punctuation
+'H' Name
+' ' Text
+'@' Operator
+' ' Text
+'beta' Name
+' ' Text
+'-' Operator
+' ' Text
+'r' Name
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_pep_515.txt b/tests/snippets/python/test_pep_515.txt
new file mode 100644
index 0000000..38fa619
--- /dev/null
+++ b/tests/snippets/python/test_pep_515.txt
@@ -0,0 +1,28 @@
+# Tests that the lexer can parse numeric literals with underscores
+
+---input---
+1_000_000
+1_000.000_001
+1_000e1_000j
+0xCAFE_F00D
+0b_0011_1111_0100_1110
+0o_777_123
+
+---tokens---
+'1_000_000' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'1_000.000_001' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1_000e1_000j' Literal.Number.Float
+'\n' Text.Whitespace
+
+'0xCAFE_F00D' Literal.Number.Hex
+'\n' Text.Whitespace
+
+'0b_0011_1111_0100_1110' Literal.Number.Bin
+'\n' Text.Whitespace
+
+'0o_777_123' Literal.Number.Oct
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_raw_fstring.txt b/tests/snippets/python/test_raw_fstring.txt
new file mode 100644
index 0000000..3381088
--- /dev/null
+++ b/tests/snippets/python/test_raw_fstring.txt
@@ -0,0 +1,46 @@
+# Tests that the lexer can parse raw f-strings
+
+---input---
+rf"m_\nu = x"
+
+f"m_\nu = {x}"
+
+rf"m_{{\nu}} = {x}"
+
+---tokens---
+'rf' Literal.String.Affix
+'"' Literal.String.Double
+'m_' Literal.String.Double
+'\\' Literal.String.Double
+'nu = x' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'f' Literal.String.Affix
+'"' Literal.String.Double
+'m_' Literal.String.Double
+'\\n' Literal.String.Escape
+'u = ' Literal.String.Double
+'{' Literal.String.Interpol
+'x' Name
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'rf' Literal.String.Affix
+'"' Literal.String.Double
+'m_' Literal.String.Double
+'{{' Literal.String.Escape
+'\\' Literal.String.Double
+'nu' Literal.String.Double
+'}}' Literal.String.Escape
+' = ' Literal.String.Double
+'{' Literal.String.Interpol
+'x' Name
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_string_escape_codes.txt b/tests/snippets/python/test_string_escape_codes.txt
new file mode 100644
index 0000000..c02dc03
--- /dev/null
+++ b/tests/snippets/python/test_string_escape_codes.txt
@@ -0,0 +1,20 @@
+---input---
+'\\ \n \x12 \777 \u1234 \U00010348 \N{Plus-Minus Sign}'
+
+---tokens---
+"'" Literal.String.Single
+'\\\\' Literal.String.Escape
+' ' Literal.String.Single
+'\\n' Literal.String.Escape
+' ' Literal.String.Single
+'\\x12' Literal.String.Escape
+' ' Literal.String.Single
+'\\777' Literal.String.Escape
+' ' Literal.String.Single
+'\\u1234' Literal.String.Escape
+' ' Literal.String.Single
+'\\U00010348' Literal.String.Escape
+' ' Literal.String.Single
+'\\N{Plus-Minus Sign}' Literal.String.Escape
+"'" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/python/test_walrus_operator.txt b/tests/snippets/python/test_walrus_operator.txt
new file mode 100644
index 0000000..9bab89d
--- /dev/null
+++ b/tests/snippets/python/test_walrus_operator.txt
@@ -0,0 +1,21 @@
+# Tests that ':=' is recognized as an Operator
+
+---input---
+if (a := 2) > 4:
+
+---tokens---
+'if' Keyword
+' ' Text
+'(' Punctuation
+'a' Name
+' ' Text
+':=' Operator
+' ' Text
+'2' Literal.Number.Integer
+')' Punctuation
+' ' Text
+'>' Operator
+' ' Text
+'4' Literal.Number.Integer
+':' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/python2/test_cls_builtin.txt b/tests/snippets/python2/test_cls_builtin.txt
new file mode 100644
index 0000000..ff533c2
--- /dev/null
+++ b/tests/snippets/python2/test_cls_builtin.txt
@@ -0,0 +1,34 @@
+# Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
+
+---input---
+class TestClass():
+ @classmethod
+ def hello(cls):
+ pass
+
+---tokens---
+'class' Keyword
+' ' Text
+'TestClass' Name.Class
+'(' Punctuation
+')' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text
+'@classmethod' Name.Decorator
+'\n' Text.Whitespace
+
+' ' Text
+'def' Keyword
+' ' Text
+'hello' Name.Function
+'(' Punctuation
+'cls' Name.Builtin.Pseudo
+')' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
+
+' ' Text
+'pass' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/qbasic/test_keywords_with_dollar.txt b/tests/snippets/qbasic/test_keywords_with_dollar.txt
new file mode 100644
index 0000000..21c4fba
--- /dev/null
+++ b/tests/snippets/qbasic/test_keywords_with_dollar.txt
@@ -0,0 +1,22 @@
+---input---
+DIM x
+x = RIGHT$("abc", 1)
+
+---tokens---
+'DIM' Keyword.Declaration
+' ' Text.Whitespace
+'x' Name.Variable.Global
+'\n' Text
+
+'x' Name.Variable.Global
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'RIGHT$' Keyword.Reserved
+'(' Punctuation
+'"abc"' Literal.String.Double
+',' Punctuation
+' ' Text.Whitespace
+'1' Literal.Number.Integer.Long
+')' Punctuation
+'\n' Text
diff --git a/tests/snippets/r/test_call.txt b/tests/snippets/r/test_call.txt
new file mode 100644
index 0000000..c35a71e
--- /dev/null
+++ b/tests/snippets/r/test_call.txt
@@ -0,0 +1,12 @@
+---input---
+f(1, a)
+
+---tokens---
+'f' Name.Function
+'(' Punctuation
+'1' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'a' Name
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_custom_operator.txt b/tests/snippets/r/test_custom_operator.txt
new file mode 100644
index 0000000..1b19df7
--- /dev/null
+++ b/tests/snippets/r/test_custom_operator.txt
@@ -0,0 +1,10 @@
+---input---
+7 % and % 8
+
+---tokens---
+'7' Literal.Number
+' ' Text.Whitespace
+'% and %' Operator
+' ' Text.Whitespace
+'8' Literal.Number
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_dot_indexing.txt b/tests/snippets/r/test_dot_indexing.txt
new file mode 100644
index 0000000..ee0871f
--- /dev/null
+++ b/tests/snippets/r/test_dot_indexing.txt
@@ -0,0 +1,9 @@
+---input---
+.[1]
+
+---tokens---
+'.' Name
+'[' Punctuation
+'1' Literal.Number
+']' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_dot_name.txt b/tests/snippets/r/test_dot_name.txt
new file mode 100644
index 0000000..01df528
--- /dev/null
+++ b/tests/snippets/r/test_dot_name.txt
@@ -0,0 +1,10 @@
+---input---
+. <- 1
+
+---tokens---
+'.' Name
+' ' Text.Whitespace
+'<-' Operator
+' ' Text.Whitespace
+'1' Literal.Number
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_indexing.txt b/tests/snippets/r/test_indexing.txt
new file mode 100644
index 0000000..6491d1a
--- /dev/null
+++ b/tests/snippets/r/test_indexing.txt
@@ -0,0 +1,9 @@
+---input---
+a[1]
+
+---tokens---
+'a' Name
+'[' Punctuation
+'1' Literal.Number
+']' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_name1.txt b/tests/snippets/r/test_name1.txt
new file mode 100644
index 0000000..a7651cb
--- /dev/null
+++ b/tests/snippets/r/test_name1.txt
@@ -0,0 +1,6 @@
+---input---
+._a_2.c
+
+---tokens---
+'._a_2.c' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_name2.txt b/tests/snippets/r/test_name2.txt
new file mode 100644
index 0000000..1570aec
--- /dev/null
+++ b/tests/snippets/r/test_name2.txt
@@ -0,0 +1,8 @@
+# Invalid names are valid if backticks are used
+
+---input---
+`.1 blah`
+
+---tokens---
+'`.1 blah`' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/r/test_name3.txt b/tests/snippets/r/test_name3.txt
new file mode 100644
index 0000000..fe5e8a4
--- /dev/null
+++ b/tests/snippets/r/test_name3.txt
@@ -0,0 +1,8 @@
+# Internal backticks can be escaped
+
+---input---
+`.1 \` blah`
+
+---tokens---
+'`.1 \\` blah`' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/ruby/test_escaped_bracestring.txt b/tests/snippets/ruby/test_escaped_bracestring.txt
new file mode 100644
index 0000000..14718b9
--- /dev/null
+++ b/tests/snippets/ruby/test_escaped_bracestring.txt
@@ -0,0 +1,19 @@
+---input---
+str.gsub(%r{\\\\}, "/")
+
+---tokens---
+'str' Name
+'.' Operator
+'gsub' Name
+'(' Punctuation
+'%r{' Literal.String.Regex
+'\\\\' Literal.String.Regex
+'\\\\' Literal.String.Regex
+'}' Literal.String.Regex
+',' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'/' Literal.String.Double
+'"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/ruby/test_interpolation_nested_curly.txt b/tests/snippets/ruby/test_interpolation_nested_curly.txt
new file mode 100644
index 0000000..f4a69f7
--- /dev/null
+++ b/tests/snippets/ruby/test_interpolation_nested_curly.txt
@@ -0,0 +1,56 @@
+---input---
+"A#{ (3..5).group_by { |x| x/2}.map do |k,v| "#{k}" end.join }" + "Z"
+
+---tokens---
+'"' Literal.String.Double
+'A' Literal.String.Double
+'#{' Literal.String.Interpol
+' ' Text.Whitespace
+'(' Punctuation
+'3' Literal.Number.Integer
+'..' Operator
+'5' Literal.Number.Integer
+')' Punctuation
+'.' Operator
+'group_by' Name
+' ' Text.Whitespace
+'{' Literal.String.Interpol
+' ' Text.Whitespace
+'|' Operator
+'x' Name
+'|' Operator
+' ' Text.Whitespace
+'x' Name
+'/' Operator
+'2' Literal.Number.Integer
+'}' Literal.String.Interpol
+'.' Operator
+'map' Name
+' ' Text.Whitespace
+'do' Keyword
+' ' Text.Whitespace
+'|' Operator
+'k' Name
+',' Punctuation
+'v' Name
+'|' Operator
+' ' Text.Whitespace
+'"' Literal.String.Double
+'#{' Literal.String.Interpol
+'k' Name
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+' ' Text.Whitespace
+'end' Keyword
+'.' Operator
+'join' Name
+' ' Text.Whitespace
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'"' Literal.String.Double
+'Z' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/ruby/test_operator_methods.txt b/tests/snippets/ruby/test_operator_methods.txt
new file mode 100644
index 0000000..c8f7a7a
--- /dev/null
+++ b/tests/snippets/ruby/test_operator_methods.txt
@@ -0,0 +1,9 @@
+---input---
+x.==4
+
+---tokens---
+'x' Name
+'.' Operator
+'==' Name.Operator
+'4' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/ruby/test_range_syntax1.txt b/tests/snippets/ruby/test_range_syntax1.txt
new file mode 100644
index 0000000..f0fc15d
--- /dev/null
+++ b/tests/snippets/ruby/test_range_syntax1.txt
@@ -0,0 +1,8 @@
+---input---
+1..3
+
+---tokens---
+'1' Literal.Number.Integer
+'..' Operator
+'3' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/ruby/test_range_syntax2.txt b/tests/snippets/ruby/test_range_syntax2.txt
new file mode 100644
index 0000000..a3ba24a
--- /dev/null
+++ b/tests/snippets/ruby/test_range_syntax2.txt
@@ -0,0 +1,8 @@
+---input---
+1...3
+
+---tokens---
+'1' Literal.Number.Integer
+'...' Operator
+'3' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/ruby/test_range_syntax3.txt b/tests/snippets/ruby/test_range_syntax3.txt
new file mode 100644
index 0000000..08bf4b1
--- /dev/null
+++ b/tests/snippets/ruby/test_range_syntax3.txt
@@ -0,0 +1,10 @@
+---input---
+1 .. 3
+
+---tokens---
+'1' Literal.Number.Integer
+' ' Text.Whitespace
+'..' Operator
+' ' Text.Whitespace
+'3' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/rust/test_attribute.txt b/tests/snippets/rust/test_attribute.txt
new file mode 100644
index 0000000..2c4a889
--- /dev/null
+++ b/tests/snippets/rust/test_attribute.txt
@@ -0,0 +1,12 @@
+---input---
+#[foo(bar = [baz, qux])]
+
+---tokens---
+'#[' Comment.Preproc
+'foo(bar = ' Comment.Preproc
+'[' Comment.Preproc
+'baz, qux' Comment.Preproc
+']' Comment.Preproc
+')' Comment.Preproc
+']' Comment.Preproc
+'\n' Text.Whitespace
diff --git a/tests/snippets/rust/test_break.txt b/tests/snippets/rust/test_break.txt
new file mode 100644
index 0000000..7dafde2
--- /dev/null
+++ b/tests/snippets/rust/test_break.txt
@@ -0,0 +1,39 @@
+---input---
+loop {
+ break;
+ break 'foo;
+ break'foo;
+ break_it;
+}
+
+---tokens---
+'loop' Keyword
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'break' Keyword
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'break' Keyword
+' ' Text.Whitespace
+"'foo" Name.Label
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'break' Keyword
+"'foo" Name.Label
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'break_it' Name
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/rust/test_rawstrings.txt b/tests/snippets/rust/test_rawstrings.txt
new file mode 100644
index 0000000..69524f9
--- /dev/null
+++ b/tests/snippets/rust/test_rawstrings.txt
@@ -0,0 +1,117 @@
+---input---
+fn main() {
+ let raw_str = r"Escapes don't work
+
+ here: \x3F \u{211D}";
+ println!("{}", raw_str);
+
+ // If you need quotes in a raw string, add a pair of #s
+ let quotes = r#"And then I said:
+
+ "There is no escape!""#;
+ println!("{}", quotes);
+
+ // If you need "# in your string, just use more #s in the delimiter.
+ // There is no limit for the number of #s you can use.
+ let longer_delimiter = r###"A string
+ with "# in it. And even "##!"###;
+ println!("{}", longer_delimiter);
+}
+
+---tokens---
+'fn' Keyword
+' ' Text
+'main' Name.Function
+'(' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'let' Keyword.Declaration
+' ' Text.Whitespace
+'raw_str' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'r"Escapes don\'t work\n\n here: \\x3F \\u{211D}"' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'println!' Name.Function.Magic
+'(' Punctuation
+'"' Literal.String
+'{}' Literal.String
+'"' Literal.String
+',' Punctuation
+' ' Text.Whitespace
+'raw_str' Name
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'// If you need quotes in a raw string, add a pair of #s\n' Comment.Single
+
+' ' Text.Whitespace
+'let' Keyword.Declaration
+' ' Text.Whitespace
+'quotes' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'r#"And then I said:\n\n "There is no escape!""#' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'println!' Name.Function.Magic
+'(' Punctuation
+'"' Literal.String
+'{}' Literal.String
+'"' Literal.String
+',' Punctuation
+' ' Text.Whitespace
+'quotes' Name
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'// If you need "# in your string, just use more #s in the delimiter.\n' Comment.Single
+
+' ' Text.Whitespace
+'// There is no limit for the number of #s you can use.\n' Comment.Single
+
+' ' Text.Whitespace
+'let' Keyword.Declaration
+' ' Text.Whitespace
+'longer_delimiter' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'r###"A string\n with "# in it. And even "##!"###' Literal.String
+';' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'println!' Name.Function.Magic
+'(' Punctuation
+'"' Literal.String
+'{}' Literal.String
+'"' Literal.String
+',' Punctuation
+' ' Text.Whitespace
+'longer_delimiter' Name
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_colon_colon_function_name.txt b/tests/snippets/scala/test_colon_colon_function_name.txt
new file mode 100644
index 0000000..8840fff
--- /dev/null
+++ b/tests/snippets/scala/test_colon_colon_function_name.txt
@@ -0,0 +1,33 @@
+---input---
+def ::(xs: List[T]): List[T] = ::(x, xs)
+
+---tokens---
+'def' Keyword
+' ' Text.Whitespace
+'::' Name.Function
+'(' Punctuation
+'xs' Name
+':' Punctuation
+' ' Text.Whitespace
+'List' Name.Class
+'[' Punctuation
+'T' Name.Class
+']' Punctuation
+')' Punctuation
+':' Punctuation
+' ' Text.Whitespace
+'List' Name.Class
+'[' Punctuation
+'T' Name.Class
+']' Punctuation
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'::' Name
+'(' Punctuation
+'x' Name
+',' Punctuation
+' ' Text.Whitespace
+'xs' Name
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_default_parameter.txt b/tests/snippets/scala/test_default_parameter.txt
new file mode 100644
index 0000000..16ce948
--- /dev/null
+++ b/tests/snippets/scala/test_default_parameter.txt
@@ -0,0 +1,37 @@
+---input---
+def f(using y: Char = if true then 'a' else 2): Int = ???
+
+---tokens---
+'def' Keyword
+' ' Text.Whitespace
+'f' Name.Function
+'(' Punctuation
+'using' Keyword
+' ' Text.Whitespace
+'y' Name
+':' Punctuation
+' ' Text.Whitespace
+'Char' Name.Class
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'if' Keyword
+' ' Text.Whitespace
+'true' Keyword.Constant
+' ' Text.Whitespace
+'then' Keyword
+' ' Text.Whitespace
+"'a'" Literal.String.Char
+' ' Text.Whitespace
+'else' Keyword
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+')' Punctuation
+':' Punctuation
+' ' Text.Whitespace
+'Int' Name.Class
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'???' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_end_val.txt b/tests/snippets/scala/test_end_val.txt
new file mode 100644
index 0000000..43ac172
--- /dev/null
+++ b/tests/snippets/scala/test_end_val.txt
@@ -0,0 +1,8 @@
+---input---
+end val
+
+---tokens---
+'end' Keyword
+' ' Text.Whitespace
+'val' Keyword
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_end_valx.txt b/tests/snippets/scala/test_end_valx.txt
new file mode 100644
index 0000000..e1d17d7
--- /dev/null
+++ b/tests/snippets/scala/test_end_valx.txt
@@ -0,0 +1,8 @@
+---input---
+end valx
+
+---tokens---
+'end' Keyword
+' ' Text.Whitespace
+'valx' Name.Namespace
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_float_with_exponents.txt b/tests/snippets/scala/test_float_with_exponents.txt
new file mode 100644
index 0000000..d64ee63
--- /dev/null
+++ b/tests/snippets/scala/test_float_with_exponents.txt
@@ -0,0 +1,12 @@
+---input---
+.1e12 .1e+34 .1e-56 .1e12f
+
+---tokens---
+'.1e12' Literal.Number.Float
+' ' Text.Whitespace
+'.1e+34' Literal.Number.Float
+' ' Text.Whitespace
+'.1e-56' Literal.Number.Float
+' ' Text.Whitespace
+'.1e12f' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_function_operator_name.txt b/tests/snippets/scala/test_function_operator_name.txt
new file mode 100644
index 0000000..72c375d
--- /dev/null
+++ b/tests/snippets/scala/test_function_operator_name.txt
@@ -0,0 +1,18 @@
+---input---
+def < (y: String): Boolean
+
+---tokens---
+'def' Keyword
+' ' Text.Whitespace
+'<' Name.Function
+' ' Text.Whitespace
+'(' Punctuation
+'y' Name
+':' Punctuation
+' ' Text.Whitespace
+'String' Name.Class
+')' Punctuation
+':' Punctuation
+' ' Text.Whitespace
+'Boolean' Name.Class
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_import_path.txt b/tests/snippets/scala/test_import_path.txt
new file mode 100644
index 0000000..1c316a0
--- /dev/null
+++ b/tests/snippets/scala/test_import_path.txt
@@ -0,0 +1,12 @@
+---input---
+import a.b.c
+
+---tokens---
+'import' Keyword
+' ' Text.Whitespace
+'a' Name.Namespace
+'.' Punctuation
+'b' Name.Namespace
+'.' Punctuation
+'c' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_invalid_symbol_and_invalid_char.txt b/tests/snippets/scala/test_invalid_symbol_and_invalid_char.txt
new file mode 100644
index 0000000..ccaae59
--- /dev/null
+++ b/tests/snippets/scala/test_invalid_symbol_and_invalid_char.txt
@@ -0,0 +1,8 @@
+---input---
+'1 //'
+
+---tokens---
+"'" Error
+'1' Literal.Number.Integer
+' ' Text.Whitespace
+"//'\n" Comment.Single
diff --git a/tests/snippets/scala/test_open_soft_keyword.txt b/tests/snippets/scala/test_open_soft_keyword.txt
new file mode 100644
index 0000000..903be71
--- /dev/null
+++ b/tests/snippets/scala/test_open_soft_keyword.txt
@@ -0,0 +1,12 @@
+---input---
+val open = true
+
+---tokens---
+'val' Keyword.Declaration
+' ' Text.Whitespace
+'open' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'true' Keyword.Constant
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_package_name.txt b/tests/snippets/scala/test_package_name.txt
new file mode 100644
index 0000000..53633e2
--- /dev/null
+++ b/tests/snippets/scala/test_package_name.txt
@@ -0,0 +1,11 @@
+---input---
+package p1.p2:
+
+---tokens---
+'package' Keyword
+' ' Text.Whitespace
+'p1' Name.Namespace
+'.' Punctuation
+'p2' Name
+':' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_prepend_operator.txt b/tests/snippets/scala/test_prepend_operator.txt
new file mode 100644
index 0000000..cf7b2f9
--- /dev/null
+++ b/tests/snippets/scala/test_prepend_operator.txt
@@ -0,0 +1,10 @@
+---input---
+a +: b
+
+---tokens---
+'a' Name
+' ' Text.Whitespace
+'+:' Operator
+' ' Text.Whitespace
+'b' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_qualified_name.txt b/tests/snippets/scala/test_qualified_name.txt
new file mode 100644
index 0000000..a004410
--- /dev/null
+++ b/tests/snippets/scala/test_qualified_name.txt
@@ -0,0 +1,10 @@
+---input---
+a.b.c
+
+---tokens---
+'a' Name
+'.' Punctuation
+'b' Name
+'.' Punctuation
+'c' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_qualified_name_class.txt b/tests/snippets/scala/test_qualified_name_class.txt
new file mode 100644
index 0000000..7345092
--- /dev/null
+++ b/tests/snippets/scala/test_qualified_name_class.txt
@@ -0,0 +1,10 @@
+---input---
+a.b.C
+
+---tokens---
+'a' Name
+'.' Punctuation
+'b' Name
+'.' Punctuation
+'C' Name.Class
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_script_header.txt b/tests/snippets/scala/test_script_header.txt
new file mode 100644
index 0000000..6d96fd6
--- /dev/null
+++ b/tests/snippets/scala/test_script_header.txt
@@ -0,0 +1,6 @@
+---input---
+#!/usr/bin/scala
+
+---tokens---
+'#!/usr/bin/scala' Comment.Hashbang
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_symbol_followed_by_op.txt b/tests/snippets/scala/test_symbol_followed_by_op.txt
new file mode 100644
index 0000000..518abce
--- /dev/null
+++ b/tests/snippets/scala/test_symbol_followed_by_op.txt
@@ -0,0 +1,7 @@
+---input---
+symbol*
+
+---tokens---
+'symbol' Name
+'*' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_symbol_name_ending_with_star.txt b/tests/snippets/scala/test_symbol_name_ending_with_star.txt
new file mode 100644
index 0000000..25e5dc6
--- /dev/null
+++ b/tests/snippets/scala/test_symbol_name_ending_with_star.txt
@@ -0,0 +1,6 @@
+---input---
+symbol_*
+
+---tokens---
+'symbol_*' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/scala/test_underscore_name.txt b/tests/snippets/scala/test_underscore_name.txt
new file mode 100644
index 0000000..f7a24af
--- /dev/null
+++ b/tests/snippets/scala/test_underscore_name.txt
@@ -0,0 +1,12 @@
+---input---
+val head = _head
+
+---tokens---
+'val' Keyword.Declaration
+' ' Text.Whitespace
+'head' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'_head' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/scheme/keywords.txt b/tests/snippets/scheme/keywords.txt
new file mode 100644
index 0000000..046a444
--- /dev/null
+++ b/tests/snippets/scheme/keywords.txt
@@ -0,0 +1,43 @@
+---input---
+(define* (foo #:key (bar123? 'baz))
+ (display bar123?)
+ (newline))
+
+(foo #:bar123? 'xyz)
+
+---tokens---
+'(' Punctuation
+'define*' Keyword
+' ' Text.Whitespace
+'(' Punctuation
+'foo' Name.Function
+' ' Text.Whitespace
+'#:key' Keyword.Declaration
+' ' Text.Whitespace
+'(' Punctuation
+'bar123?' Name.Function
+' ' Text.Whitespace
+"'baz" Literal.String.Symbol
+')' Punctuation
+')' Punctuation
+'\n ' Text.Whitespace
+'(' Punctuation
+'display' Name.Builtin
+' ' Text.Whitespace
+'bar123?' Name.Variable
+')' Punctuation
+'\n ' Text.Whitespace
+'(' Punctuation
+'newline' Name.Builtin
+')' Punctuation
+')' Punctuation
+'\n\n' Text.Whitespace
+
+'(' Punctuation
+'foo' Name.Function
+' ' Text.Whitespace
+'#:bar123?' Keyword.Declaration
+' ' Text.Whitespace
+"'xyz" Literal.String.Symbol
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/scheme/numbers.txt b/tests/snippets/scheme/numbers.txt
new file mode 100644
index 0000000..f03b3a7
--- /dev/null
+++ b/tests/snippets/scheme/numbers.txt
@@ -0,0 +1,169 @@
+---input---
+;; All sorts of numbers, common and less common.
+
+0
+142
+-142
++142
+-0.5
++0.5
+-0.5e-10
++0.5e10
+0.5e+10
+.5F+10
+.123
+123.
+123.L-25|54
++inf.0
+-inf.0
+#b+nan.0-inf.0i
+1-2i
+1-i
++i
+-5f24@3.14159
+4/5
+5.4e5@4
+#i5
+#o5
+#i#o5
+#o#i5
+#b01/10
+#x0f42a
+#E#b01/10
+#d#I01/10
+#i-324@3.14159
+#o#I01/1022-inf.0i
+
+;; These are not numbers but plain symbols.
+
+1+
+1-
+i
+inf
+-inf
+nan
+-nan
+
+---tokens---
+';; All sorts of numbers, common and less common.' Comment.Single
+'\n\n' Text.Whitespace
+
+'0' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'142' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'-142' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'+142' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'-0.5' Literal.Number.Float
+'\n' Text.Whitespace
+
+'+0.5' Literal.Number.Float
+'\n' Text.Whitespace
+
+'-0.5e-10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'+0.5e10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'0.5e+10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'.5F+10' Literal.Number.Float
+'\n' Text.Whitespace
+
+'.123' Literal.Number.Float
+'\n' Text.Whitespace
+
+'123.' Literal.Number.Float
+'\n' Text.Whitespace
+
+'123.L-25|54' Literal.Number.Float
+'\n' Text.Whitespace
+
+'+inf.0' Literal.Number.Float
+'\n' Text.Whitespace
+
+'-inf.0' Literal.Number.Float
+'\n' Text.Whitespace
+
+'#b+nan.0-inf.0i' Literal.Number.Bin
+'\n' Text.Whitespace
+
+'1-2i' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'1-i' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'+i' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'-5f24@3.14159' Literal.Number.Float
+'\n' Text.Whitespace
+
+'4/5' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'5.4e5@4' Literal.Number.Float
+'\n' Text.Whitespace
+
+'#i5' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'#o5' Literal.Number.Oct
+'\n' Text.Whitespace
+
+'#i#o5' Literal.Number.Oct
+'\n' Text.Whitespace
+
+'#o#i5' Literal.Number.Oct
+'\n' Text.Whitespace
+
+'#b01/10' Literal.Number.Bin
+'\n' Text.Whitespace
+
+'#x0f42a' Literal.Number.Hex
+'\n' Text.Whitespace
+
+'#E#b01/10' Literal.Number.Bin
+'\n' Text.Whitespace
+
+'#d#I01/10' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'#i-324@3.14159' Literal.Number.Float
+'\n' Text.Whitespace
+
+'#o#I01/1022-inf.0i' Literal.Number.Oct
+'\n\n' Text.Whitespace
+
+';; These are not numbers but plain symbols.' Comment.Single
+'\n\n' Text.Whitespace
+
+'1+' Name.Builtin
+'\n' Text.Whitespace
+
+'1-' Name.Builtin
+'\n' Text.Whitespace
+
+'i' Name.Variable
+'\n' Text.Whitespace
+
+'inf' Name.Builtin
+'\n' Text.Whitespace
+
+'-inf' Name.Variable
+'\n' Text.Whitespace
+
+'nan' Name.Builtin
+'\n' Text.Whitespace
+
+'-nan' Name.Variable
+'\n' Text.Whitespace
diff --git a/tests/snippets/scheme/strings.txt b/tests/snippets/scheme/strings.txt
new file mode 100644
index 0000000..9d03351
--- /dev/null
+++ b/tests/snippets/scheme/strings.txt
@@ -0,0 +1,85 @@
+---input---
+;; Test string escapes
+
+"basic string"
+
+"Strings can
+ span several
+
+ lines.
+ "
+
+"Escapes:
+ \"
+ \\
+ \|
+ \a
+ \f
+ \n
+ \r
+ \t
+ \v
+ \b
+ \0
+ \(
+ \
+ \x125612; (R6RS-style)
+ \x13 (Guile-style)
+ \u1234
+ \U123456
+"
+
+---tokens---
+';; Test string escapes' Comment.Single
+'\n\n' Text.Whitespace
+
+'"' Literal.String
+'basic string' Literal.String
+'"' Literal.String
+'\n\n' Text.Whitespace
+
+'"' Literal.String
+'Strings can\n span several\n\n lines.\n ' Literal.String
+'"' Literal.String
+'\n\n' Text.Whitespace
+
+'"' Literal.String
+'Escapes:\n ' Literal.String
+'\\"' Literal.String.Escape
+'\n ' Literal.String
+'\\\\' Literal.String.Escape
+'\n ' Literal.String
+'\\|' Literal.String.Escape
+'\n ' Literal.String
+'\\a' Literal.String.Escape
+'\n ' Literal.String
+'\\f' Literal.String.Escape
+'\n ' Literal.String
+'\\n' Literal.String.Escape
+'\n ' Literal.String
+'\\r' Literal.String.Escape
+'\n ' Literal.String
+'\\t' Literal.String.Escape
+'\n ' Literal.String
+'\\v' Literal.String.Escape
+'\n ' Literal.String
+'\\b' Literal.String.Escape
+'\n ' Literal.String
+'\\0' Literal.String.Escape
+'\n ' Literal.String
+'\\(' Literal.String.Escape
+'\n ' Literal.String
+'\\\n' Literal.String.Escape
+
+' ' Literal.String
+'\\x125612;' Literal.String.Escape
+' (R6RS-style)\n ' Literal.String
+'\\x13' Literal.String.Escape
+' (Guile-style)\n ' Literal.String
+'\\u1234' Literal.String.Escape
+'\n ' Literal.String
+'\\U123456' Literal.String.Escape
+'\n' Literal.String
+
+'"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/shell/test_array_nums.txt b/tests/snippets/shell/test_array_nums.txt
new file mode 100644
index 0000000..4e9870c
--- /dev/null
+++ b/tests/snippets/shell/test_array_nums.txt
@@ -0,0 +1,14 @@
+---input---
+a=(1 2 3)
+
+---tokens---
+'a' Name.Variable
+'=' Operator
+'(' Operator
+'1' Literal.Number
+' ' Text.Whitespace
+'2' Literal.Number
+' ' Text.Whitespace
+'3' Literal.Number
+')' Operator
+'\n' Text.Whitespace
diff --git a/tests/snippets/shell/test_curly_no_escape_and_quotes.txt b/tests/snippets/shell/test_curly_no_escape_and_quotes.txt
new file mode 100644
index 0000000..9fbb718
--- /dev/null
+++ b/tests/snippets/shell/test_curly_no_escape_and_quotes.txt
@@ -0,0 +1,15 @@
+---input---
+echo "${a//["b"]/}"
+
+---tokens---
+'echo' Name.Builtin
+' ' Text.Whitespace
+'"' Literal.String.Double
+'${' Literal.String.Interpol
+'a' Name.Variable
+'//[' Punctuation
+'"b"' Literal.String.Double
+']/' Punctuation
+'}' Literal.String.Interpol
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/shell/test_curly_with_escape.txt b/tests/snippets/shell/test_curly_with_escape.txt
new file mode 100644
index 0000000..d03b23e
--- /dev/null
+++ b/tests/snippets/shell/test_curly_with_escape.txt
@@ -0,0 +1,13 @@
+---input---
+echo ${a//[\"]/}
+
+---tokens---
+'echo' Name.Builtin
+' ' Text.Whitespace
+'${' Literal.String.Interpol
+'a' Name.Variable
+'//[' Punctuation
+'\\"' Literal.String.Escape
+']/' Punctuation
+'}' Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/shell/test_end_of_line_nums.txt b/tests/snippets/shell/test_end_of_line_nums.txt
new file mode 100644
index 0000000..663ea61
--- /dev/null
+++ b/tests/snippets/shell/test_end_of_line_nums.txt
@@ -0,0 +1,15 @@
+---input---
+a=1
+b=2 # comment
+
+---tokens---
+'a' Name.Variable
+'=' Operator
+'1' Literal.Number
+'\n' Text.Whitespace
+
+'b' Name.Variable
+'=' Operator
+'2' Literal.Number
+' ' Text.Whitespace
+'# comment\n' Comment.Single
diff --git a/tests/snippets/shell/test_parsed_single.txt b/tests/snippets/shell/test_parsed_single.txt
new file mode 100644
index 0000000..3110c8e
--- /dev/null
+++ b/tests/snippets/shell/test_parsed_single.txt
@@ -0,0 +1,8 @@
+---input---
+a=$'abc\''
+
+---tokens---
+'a' Name.Variable
+'=' Operator
+"$'abc\\''" Literal.String.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/shell/test_short_variable_names.txt b/tests/snippets/shell/test_short_variable_names.txt
new file mode 100644
index 0000000..86d4dc1
--- /dev/null
+++ b/tests/snippets/shell/test_short_variable_names.txt
@@ -0,0 +1,26 @@
+---input---
+x="$"
+y="$_"
+z="$abc"
+
+---tokens---
+'x' Name.Variable
+'=' Operator
+'"' Literal.String.Double
+'$' Text
+'"' Literal.String.Double
+'\n' Text.Whitespace
+
+'y' Name.Variable
+'=' Operator
+'"' Literal.String.Double
+'$_' Name.Variable
+'"' Literal.String.Double
+'\n' Text.Whitespace
+
+'z' Name.Variable
+'=' Operator
+'"' Literal.String.Double
+'$abc' Name.Variable
+'"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/shexc/test_prefixed_name_starting_with_number.txt b/tests/snippets/shexc/test_prefixed_name_starting_with_number.txt
new file mode 100644
index 0000000..ca1c293
--- /dev/null
+++ b/tests/snippets/shexc/test_prefixed_name_starting_with_number.txt
@@ -0,0 +1,8 @@
+---input---
+alice:6f6e4241-75a2-4780-9b2a-40da53082e54
+
+---tokens---
+'alice' Name.Namespace
+':' Punctuation
+'6f6e4241-75a2-4780-9b2a-40da53082e54' Name.Tag
+'\n' Text
diff --git a/tests/snippets/smarty/test_nested_curly.txt b/tests/snippets/smarty/test_nested_curly.txt
new file mode 100644
index 0000000..493aa49
--- /dev/null
+++ b/tests/snippets/smarty/test_nested_curly.txt
@@ -0,0 +1,18 @@
+---input---
+{templateFunction param={anotherFunction} param2=$something}
+
+---tokens---
+'{' Comment.Preproc
+'templateFunction' Name.Function
+' ' Text
+'param' Name.Attribute
+'=' Operator
+'{' Comment.Preproc
+'anotherFunction' Name.Attribute
+'}' Comment.Preproc
+' ' Text
+'param2' Name.Attribute
+'=' Operator
+'$something' Name.Variable
+'}' Comment.Preproc
+'\n' Other
diff --git a/tests/snippets/snbt/json.txt b/tests/snippets/snbt/json.txt
new file mode 100644
index 0000000..7b02134
--- /dev/null
+++ b/tests/snippets/snbt/json.txt
@@ -0,0 +1,43 @@
+---input---
+{
+ "text": "This is JSON",
+ "extra": [
+ "however",
+ "it's also technically valid SNBT.."
+ ]
+}
+
+---tokens---
+'{' Punctuation
+'\n ' Text.Whitespace
+'"' Literal.String.Double
+'text' Literal.String.Double
+'"' Literal.String.Double
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'This is JSON' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+'\n ' Text.Whitespace
+'"' Literal.String.Double
+'extra' Literal.String.Double
+'"' Literal.String.Double
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'\n ' Text.Whitespace
+'"' Literal.String.Double
+'however' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+'\n ' Text.Whitespace
+'"' Literal.String.Double
+"it's also technically valid SNBT.." Literal.String.Double
+'"' Literal.String.Double
+'\n ' Text.Whitespace
+']' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text
diff --git a/tests/snippets/snbt/literals.txt b/tests/snippets/snbt/literals.txt
new file mode 100644
index 0000000..cc852b8
--- /dev/null
+++ b/tests/snippets/snbt/literals.txt
@@ -0,0 +1,41 @@
+---input---
+{int: 1, byte: 0b, short: 1s, long: 10000L, float: 10.0f, double: 20.0}
+
+---tokens---
+'{' Punctuation
+'int' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'1' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'byte' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'0b' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'short' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'1s' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'long' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'10000L' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'float' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'10.0f' Literal.Number.Float
+',' Punctuation
+' ' Text.Whitespace
+'double' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'20.0' Literal.Number.Float
+'}' Punctuation
+'\n' Text
diff --git a/tests/snippets/snbt/multiline.txt b/tests/snippets/snbt/multiline.txt
new file mode 100644
index 0000000..35a6a53
--- /dev/null
+++ b/tests/snippets/snbt/multiline.txt
@@ -0,0 +1,56 @@
+---input---
+{
+ key: "cool compound",
+ UUID: [I;459130179,987513928,-1425663264,-175461800],
+ list: [
+ 0,
+ 1,
+ 3
+ ]
+}
+
+---tokens---
+'{' Punctuation
+'\n ' Text.Whitespace
+'key' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'cool compound' Literal.String.Double
+'"' Literal.String.Double
+',' Punctuation
+'\n ' Text.Whitespace
+'UUID' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'I' Name.Attribute
+';' Punctuation
+'459130179' Literal.Number.Integer
+',' Punctuation
+'987513928' Literal.Number.Integer
+',' Punctuation
+'-1425663264' Literal.Number.Integer
+',' Punctuation
+'-175461800' Literal.Number.Integer
+']' Punctuation
+',' Punctuation
+'\n ' Text.Whitespace
+'list' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'\n ' Text.Whitespace
+'0' Literal.Number.Integer
+',' Punctuation
+'\n ' Text.Whitespace
+'1' Literal.Number.Integer
+',' Punctuation
+'\n ' Text.Whitespace
+'3' Literal.Number.Integer
+'\n ' Text.Whitespace
+']' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text
diff --git a/tests/snippets/snbt/nesting.txt b/tests/snippets/snbt/nesting.txt
new file mode 100644
index 0000000..317a9c8
--- /dev/null
+++ b/tests/snippets/snbt/nesting.txt
@@ -0,0 +1,39 @@
+---input---
+{root: [{compound: 1b}, {compound: 2b, tag: {key: "value"}}]}
+
+---tokens---
+'{' Punctuation
+'root' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'{' Punctuation
+'compound' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'1b' Literal.Number.Integer
+'}' Punctuation
+',' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'compound' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'2b' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'tag' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'key' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'value' Literal.String.Double
+'"' Literal.String.Double
+'}' Punctuation
+'}' Punctuation
+']' Punctuation
+'}' Punctuation
+'\n' Text
diff --git a/tests/snippets/snbt/quoted_keys.txt b/tests/snippets/snbt/quoted_keys.txt
new file mode 100644
index 0000000..8e54f5f
--- /dev/null
+++ b/tests/snippets/snbt/quoted_keys.txt
@@ -0,0 +1,29 @@
+---input---
+{"special--key": 10b, normal_key: false, "json..///_type_key": "yup"}
+
+---tokens---
+'{' Punctuation
+'"' Literal.String.Double
+'special--key' Literal.String.Double
+'"' Literal.String.Double
+':' Punctuation
+' ' Text.Whitespace
+'10b' Literal.Number.Integer
+',' Punctuation
+' ' Text.Whitespace
+'normal_key' Name.Attribute
+':' Punctuation
+' ' Text.Whitespace
+'false' Name.Attribute
+',' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'json..///_type_key' Literal.String.Double
+'"' Literal.String.Double
+':' Punctuation
+' ' Text.Whitespace
+'"' Literal.String.Double
+'yup' Literal.String.Double
+'"' Literal.String.Double
+'}' Punctuation
+'\n' Text
diff --git a/tests/snippets/systemverilog/test_basic.txt b/tests/snippets/systemverilog/test_basic.txt
new file mode 100644
index 0000000..1f86ed0
--- /dev/null
+++ b/tests/snippets/systemverilog/test_basic.txt
@@ -0,0 +1,157 @@
+# Examine tokens emitted by the SV lexer for a trivial module.
+# Not intended to stress any particular corner of the language.
+
+---input---
+// Adder flops the sum of its inputs
+module Adder #(
+ parameter int N = 42
+) (
+ output logic [N-1:0] y,
+ output logic co,
+
+ input logic [N-1:0] a,
+ input logic [N-1:0] b,
+ input logic ci,
+
+ input logic clk
+);
+ always_ff @(posedge clk) begin
+ {co, y} <= a + b + ci;
+ end
+endmodule : Adder
+
+---tokens---
+'// Adder flops the sum of its inputs\n' Comment.Single
+
+'module' Keyword
+' ' Text.Whitespace
+'Adder' Name
+' ' Text.Whitespace
+'#' Punctuation
+'(' Punctuation
+'\n ' Text.Whitespace
+'parameter' Keyword
+' ' Text.Whitespace
+'int' Keyword.Type
+' ' Text.Whitespace
+'N' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'42' Literal.Number.Integer
+'\n' Text.Whitespace
+
+')' Punctuation
+' ' Text.Whitespace
+'(' Punctuation
+'\n ' Text.Whitespace
+'output' Keyword
+' ' Text.Whitespace
+'logic' Keyword.Type
+' ' Text.Whitespace
+'[' Punctuation
+'N' Name
+'-' Operator
+'1' Literal.Number.Integer
+':' Operator
+'0' Literal.Number.Integer
+']' Punctuation
+' ' Text.Whitespace
+'y' Name
+',' Punctuation
+'\n ' Text.Whitespace
+'output' Keyword
+' ' Text.Whitespace
+'logic' Keyword.Type
+' ' Text.Whitespace
+'co' Name
+',' Punctuation
+'\n\n ' Text.Whitespace
+'input' Keyword
+' ' Text.Whitespace
+'logic' Keyword.Type
+' ' Text.Whitespace
+'[' Punctuation
+'N' Name
+'-' Operator
+'1' Literal.Number.Integer
+':' Operator
+'0' Literal.Number.Integer
+']' Punctuation
+' ' Text.Whitespace
+'a' Name
+',' Punctuation
+'\n ' Text.Whitespace
+'input' Keyword
+' ' Text.Whitespace
+'logic' Keyword.Type
+' ' Text.Whitespace
+'[' Punctuation
+'N' Name
+'-' Operator
+'1' Literal.Number.Integer
+':' Operator
+'0' Literal.Number.Integer
+']' Punctuation
+' ' Text.Whitespace
+'b' Name
+',' Punctuation
+'\n ' Text.Whitespace
+'input' Keyword
+' ' Text.Whitespace
+'logic' Keyword.Type
+' ' Text.Whitespace
+'ci' Name
+',' Punctuation
+'\n\n ' Text.Whitespace
+'input' Keyword
+' ' Text.Whitespace
+'logic' Keyword.Type
+' ' Text.Whitespace
+'clk' Name
+'\n' Text.Whitespace
+
+')' Punctuation
+';' Punctuation
+'\n ' Text.Whitespace
+'always_ff' Keyword
+' ' Text.Whitespace
+'@' Punctuation
+'(' Punctuation
+'posedge' Keyword
+' ' Text.Whitespace
+'clk' Name
+')' Punctuation
+' ' Text.Whitespace
+'begin' Keyword
+'\n ' Text.Whitespace
+'{' Punctuation
+'co' Name
+',' Punctuation
+' ' Text.Whitespace
+'y' Name
+'}' Punctuation
+' ' Text.Whitespace
+'<' Operator
+'=' Operator
+' ' Text.Whitespace
+'a' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'b' Name
+' ' Text.Whitespace
+'+' Operator
+' ' Text.Whitespace
+'ci' Name
+';' Punctuation
+'\n ' Text.Whitespace
+'end' Keyword
+'\n' Text.Whitespace
+
+'endmodule' Keyword
+' ' Text.Whitespace
+':' Operator
+' ' Text.Whitespace
+'Adder' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/systemverilog/test_classes.txt b/tests/snippets/systemverilog/test_classes.txt
new file mode 100644
index 0000000..b9529db
--- /dev/null
+++ b/tests/snippets/systemverilog/test_classes.txt
@@ -0,0 +1,89 @@
+# Most of the interesting types of class declarations
+
+---input---
+class Foo;
+endclass
+
+class Bar;
+endclass : Bar
+
+class Fiz extends Buz;
+endclass : Fiz
+
+class Free #(parameter type T = byte) extends Beer #(T);
+endclass : Free
+
+---tokens---
+'class' Keyword.Declaration
+' ' Text.Whitespace
+'Foo' Name.Class
+';' Punctuation
+'\n' Text.Whitespace
+
+'endclass' Keyword.Declaration
+'\n\n' Text.Whitespace
+
+'class' Keyword.Declaration
+' ' Text.Whitespace
+'Bar' Name.Class
+';' Punctuation
+'\n' Text.Whitespace
+
+'endclass' Keyword.Declaration
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'Bar' Name.Class
+'\n\n' Text.Whitespace
+
+'class' Keyword.Declaration
+' ' Text.Whitespace
+'Fiz' Name.Class
+' ' Text.Whitespace
+'extends' Keyword.Declaration
+' ' Text.Whitespace
+'Buz' Name.Class
+';' Punctuation
+'\n' Text.Whitespace
+
+'endclass' Keyword.Declaration
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'Fiz' Name.Class
+'\n\n' Text.Whitespace
+
+'class' Keyword.Declaration
+' ' Text.Whitespace
+'Free' Name.Class
+' ' Text.Whitespace
+'#' Punctuation
+'(' Punctuation
+'parameter' Keyword
+' ' Text.Whitespace
+'type' Keyword.Type
+' ' Text.Whitespace
+'T' Name
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'byte' Keyword.Type
+')' Punctuation
+' ' Text.Whitespace
+'extends' Keyword.Declaration
+' ' Text.Whitespace
+'Beer' Name.Class
+' ' Text.Whitespace
+'#' Punctuation
+'(' Punctuation
+'T' Name
+')' Punctuation
+';' Punctuation
+'\n' Text.Whitespace
+
+'endclass' Keyword.Declaration
+' ' Text.Whitespace
+':' Punctuation
+' ' Text.Whitespace
+'Free' Name.Class
+'\n' Text.Whitespace
diff --git a/tests/snippets/systemverilog/test_numbers.txt b/tests/snippets/systemverilog/test_numbers.txt
new file mode 100644
index 0000000..d7e4511
--- /dev/null
+++ b/tests/snippets/systemverilog/test_numbers.txt
@@ -0,0 +1,158 @@
+# Believe it or not, SystemVerilog supports spaces before and after the base
+# specifier (ie 'b, 'd, 'h). See IEEE 1800-2017 Section 5.7.1 for examples.
+
+---input---
+8'b10101010
+8 'b10101010
+8'b 10101010
+8'sb10101010
+8'Sb10101010
+8'B10101010
+8'b1010_1010
+8'b10xXzZ?10
+
+24'o01234567
+24 'o01234567
+24'o 01234567
+24'so01234567
+24'So01234567
+24'O01234567
+24'o0123_4567
+24'o01xXzZ?7
+
+32'd27182818
+32 'd27182818
+32'd 27182818
+32'sd27182818
+32'Sd27182818
+32'D27182818
+32'd2718_2818
+32'd27xXzZ?8
+
+32'hdeadbeef
+32 'hdeadbeef
+32'h deadbeef
+32'shdeadbeef
+32'Shdeadbeef
+32'Hdeadbeef
+32'hdead_beef
+32'hdexXzZ?f
+
+'0 '1 'x 'X 'z 'Z
+
+42 1234_5678
+
+---tokens---
+"8'b10101010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8 'b10101010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8'b 10101010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8'sb10101010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8'Sb10101010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8'B10101010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8'b1010_1010" Literal.Number.Bin
+'\n' Text.Whitespace
+
+"8'b10xXzZ?10" Literal.Number.Bin
+'\n\n' Text.Whitespace
+
+"24'o01234567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24 'o01234567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24'o 01234567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24'so01234567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24'So01234567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24'O01234567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24'o0123_4567" Literal.Number.Oct
+'\n' Text.Whitespace
+
+"24'o01xXzZ?7" Literal.Number.Oct
+'\n\n' Text.Whitespace
+
+"32'd27182818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32 'd27182818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32'd 27182818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32'sd27182818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32'Sd27182818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32'D27182818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32'd2718_2818" Literal.Number.Integer
+'\n' Text.Whitespace
+
+"32'd27xXzZ?8" Literal.Number.Integer
+'\n\n' Text.Whitespace
+
+"32'hdeadbeef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32 'hdeadbeef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32'h deadbeef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32'shdeadbeef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32'Shdeadbeef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32'Hdeadbeef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32'hdead_beef" Literal.Number.Hex
+'\n' Text.Whitespace
+
+"32'hdexXzZ?f" Literal.Number.Hex
+'\n\n' Text.Whitespace
+
+"'0" Literal.Number
+' ' Text.Whitespace
+"'1" Literal.Number
+' ' Text.Whitespace
+"'x" Literal.Number
+' ' Text.Whitespace
+"'X" Literal.Number
+' ' Text.Whitespace
+"'z" Literal.Number
+' ' Text.Whitespace
+"'Z" Literal.Number
+'\n\n' Text.Whitespace
+
+'42' Literal.Number.Integer
+' ' Text.Whitespace
+'1234_5678' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/systemverilog/test_operators.txt b/tests/snippets/systemverilog/test_operators.txt
new file mode 100644
index 0000000..fec4539
--- /dev/null
+++ b/tests/snippets/systemverilog/test_operators.txt
@@ -0,0 +1,213 @@
+# See 1800-2017 Table 11-2: Operator Precedence and Associativity
+# Note that the duplicates (unary/binary) have been removed,
+# ie '+', '-', '&', '|', '^', '~^', '^~'
+# Note: This is a inconsistent mix of operator and punctuation
+# Note: Operators would ideally be represented as one token: ':' ':' -> '::', '~' '&' -> '~&'
+
+---input---
+() [] :: .
++ - ! ~ & ~& | ~| ^ ~^ ^~ ++ --
+**
+* / %
+<< >> <<< >>>
+< <= > >= inside dist
+== != === !== ==? !=?
+&&
+||
+?:
+-> <->
+= += -= *= /= %= &= ^= |= <<= >>= <<<= >>>= := :/ <=
+{} {{}}
+
+---tokens---
+'(' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+']' Punctuation
+' ' Text.Whitespace
+':' Operator
+':' Operator
+' ' Text.Whitespace
+'.' Punctuation
+'\n' Text.Whitespace
+
+'+' Operator
+' ' Text.Whitespace
+'-' Operator
+' ' Text.Whitespace
+'!' Operator
+' ' Text.Whitespace
+'~' Operator
+' ' Text.Whitespace
+'&' Operator
+' ' Text.Whitespace
+'~' Operator
+'&' Operator
+' ' Text.Whitespace
+'|' Operator
+' ' Text.Whitespace
+'~' Operator
+'|' Operator
+' ' Text.Whitespace
+'^' Operator
+' ' Text.Whitespace
+'~' Operator
+'^' Operator
+' ' Text.Whitespace
+'^' Operator
+'~' Operator
+' ' Text.Whitespace
+'+' Operator
+'+' Operator
+' ' Text.Whitespace
+'-' Operator
+'-' Operator
+'\n' Text.Whitespace
+
+'*' Operator
+'*' Operator
+'\n' Text.Whitespace
+
+'*' Operator
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'%' Operator
+'\n' Text.Whitespace
+
+'<' Operator
+'<' Operator
+' ' Text.Whitespace
+'>' Operator
+'>' Operator
+' ' Text.Whitespace
+'<' Operator
+'<' Operator
+'<' Operator
+' ' Text.Whitespace
+'>' Operator
+'>' Operator
+'>' Operator
+'\n' Text.Whitespace
+
+'<' Operator
+' ' Text.Whitespace
+'<' Operator
+'=' Operator
+' ' Text.Whitespace
+'>' Operator
+' ' Text.Whitespace
+'>' Operator
+'=' Operator
+' ' Text.Whitespace
+'inside' Operator.Word
+' ' Text.Whitespace
+'dist' Operator.Word
+'\n' Text.Whitespace
+
+'=' Operator
+'=' Operator
+' ' Text.Whitespace
+'!' Operator
+'=' Operator
+' ' Text.Whitespace
+'=' Operator
+'=' Operator
+'=' Operator
+' ' Text.Whitespace
+'!' Operator
+'=' Operator
+'=' Operator
+' ' Text.Whitespace
+'=' Operator
+'=' Operator
+'?' Operator
+' ' Text.Whitespace
+'!' Operator
+'=' Operator
+'?' Operator
+'\n' Text.Whitespace
+
+'&' Operator
+'&' Operator
+'\n' Text.Whitespace
+
+'|' Operator
+'|' Operator
+'\n' Text.Whitespace
+
+'?' Operator
+':' Operator
+'\n' Text.Whitespace
+
+'-' Operator
+'>' Operator
+' ' Text.Whitespace
+'<' Operator
+'-' Operator
+'>' Operator
+'\n' Text.Whitespace
+
+'=' Operator
+' ' Text.Whitespace
+'+' Operator
+'=' Operator
+' ' Text.Whitespace
+'-' Operator
+'=' Operator
+' ' Text.Whitespace
+'*' Operator
+'=' Operator
+' ' Text.Whitespace
+'/' Operator
+'=' Operator
+' ' Text.Whitespace
+'%' Operator
+'=' Operator
+' ' Text.Whitespace
+'&' Operator
+'=' Operator
+' ' Text.Whitespace
+'^' Operator
+'=' Operator
+' ' Text.Whitespace
+'|' Operator
+'=' Operator
+' ' Text.Whitespace
+'<' Operator
+'<' Operator
+'=' Operator
+' ' Text.Whitespace
+'>' Operator
+'>' Operator
+'=' Operator
+' ' Text.Whitespace
+'<' Operator
+'<' Operator
+'<' Operator
+'=' Operator
+' ' Text.Whitespace
+'>' Operator
+'>' Operator
+'>' Operator
+'=' Operator
+' ' Text.Whitespace
+':' Operator
+'=' Operator
+' ' Text.Whitespace
+':' Operator
+'/' Operator
+' ' Text.Whitespace
+'<' Operator
+'=' Operator
+'\n' Text.Whitespace
+
+'{' Punctuation
+'}' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'{' Punctuation
+'}' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/tcl/test_comma_and_at.txt b/tests/snippets/tcl/test_comma_and_at.txt
new file mode 100644
index 0000000..51e1733
--- /dev/null
+++ b/tests/snippets/tcl/test_comma_and_at.txt
@@ -0,0 +1,131 @@
+---input---
+# Alt and arrow keys to scroll
+set scroll_amount 2
+bind Text <Alt-Up> {
+ %W yview scroll -$scroll_amount units
+ %W mark set insert @0,[expr [winfo height %W] / 2]
+}
+bind Text <Alt-Down> {
+ %W yview scroll $scroll_amount units
+ %W mark set insert @0,[expr [winfo height %W] / 2]
+}
+
+---tokens---
+'#' Comment
+' Alt and arrow keys to scroll\n' Comment
+
+'set' Keyword
+' ' Text.Whitespace
+'scroll_amount' Text
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+'\n' Text
+
+'bind' Name.Variable
+' ' Text.Whitespace
+'Text' Text
+' ' Text.Whitespace
+'<' Operator
+'Alt-Up' Text
+'>' Operator
+' ' Text.Whitespace
+'{' Keyword
+'\n ' Text.Whitespace
+'%' Operator
+'W' Name.Variable
+' ' Text.Whitespace
+'yview' Text
+' ' Text.Whitespace
+'scroll' Text
+' ' Text.Whitespace
+'-' Operator
+'$scroll_amount' Name.Variable
+' ' Text.Whitespace
+'units' Text
+'\n' Text
+
+' ' Text.Whitespace
+'%' Operator
+'W' Name.Variable
+' ' Text.Whitespace
+'mark' Text
+' ' Text.Whitespace
+'set' Text
+' ' Text.Whitespace
+'insert' Text
+' ' Text.Whitespace
+'@0,' Text
+'[' Keyword
+'expr' Keyword
+' ' Text.Whitespace
+'[' Keyword
+'winfo' Name.Variable
+' ' Text.Whitespace
+'height' Text
+' ' Text.Whitespace
+'%' Operator
+'W' Text
+']' Keyword
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+']' Keyword
+'\n' Text
+
+'}' Keyword
+'\n' Text
+
+'bind' Name.Variable
+' ' Text.Whitespace
+'Text' Text
+' ' Text.Whitespace
+'<' Operator
+'Alt-Down' Text
+'>' Operator
+' ' Text.Whitespace
+'{' Keyword
+'\n ' Text.Whitespace
+'%' Operator
+'W' Name.Variable
+' ' Text.Whitespace
+'yview' Text
+' ' Text.Whitespace
+'scroll' Text
+' ' Text.Whitespace
+'$scroll_amount' Name.Variable
+' ' Text.Whitespace
+'units' Text
+'\n' Text
+
+' ' Text.Whitespace
+'%' Operator
+'W' Name.Variable
+' ' Text.Whitespace
+'mark' Text
+' ' Text.Whitespace
+'set' Text
+' ' Text.Whitespace
+'insert' Text
+' ' Text.Whitespace
+'@0,' Text
+'[' Keyword
+'expr' Keyword
+' ' Text.Whitespace
+'[' Keyword
+'winfo' Name.Variable
+' ' Text.Whitespace
+'height' Text
+' ' Text.Whitespace
+'%' Operator
+'W' Text
+']' Keyword
+' ' Text.Whitespace
+'/' Operator
+' ' Text.Whitespace
+'2' Literal.Number.Integer
+']' Keyword
+'\n' Text
+
+'}' Keyword
+'\n' Text
diff --git a/tests/snippets/tcl/test_vars.txt b/tests/snippets/tcl/test_vars.txt
new file mode 100644
index 0000000..5c62390
--- /dev/null
+++ b/tests/snippets/tcl/test_vars.txt
@@ -0,0 +1,17 @@
+---input---
+set size 10; puts ${size}x${size}
+
+---tokens---
+'set' Keyword
+' ' Text.Whitespace
+'size' Text
+' ' Text.Whitespace
+'10' Literal.Number.Integer
+';' Keyword
+' ' Text.Whitespace
+'puts' Name.Builtin
+' ' Text.Whitespace
+'${size}' Name.Variable
+'x' Text
+'${size}' Name.Variable
+'\n' Text
diff --git a/tests/snippets/teal/test_comments.txt b/tests/snippets/teal/test_comments.txt
new file mode 100644
index 0000000..df7e1b1
--- /dev/null
+++ b/tests/snippets/teal/test_comments.txt
@@ -0,0 +1,28 @@
+---input---
+a//c1
+ //c2
+label://c3
+a // c4
+label: implicit comment
+
+---tokens---
+'a' Name.Function
+'//c1' Comment.Single
+'\n' Text
+
+' ' Text.Whitespace
+'//c2' Comment.Single
+'\n' Text.Whitespace
+
+'label:' Name.Function
+'//c3' Comment.Single
+'\n' Text
+
+'a' Name.Function
+' ' Text.Whitespace
+'// c4' Comment.Single
+'\n' Text
+
+'label:' Name.Label
+' implicit comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/teal/test_literals.txt b/tests/snippets/teal/test_literals.txt
new file mode 100644
index 0000000..46e43dc
--- /dev/null
+++ b/tests/snippets/teal/test_literals.txt
@@ -0,0 +1,28 @@
+---input---
+a 0x1AaAF
+a 7777777777777777777777777777777777777777777777777774MSJUVU
+a base32(aB/c23=)
+a b64 aB/c23=
+
+---tokens---
+'a' Name.Function
+' ' Text.Whitespace
+'0x1AaAF' Literal.Number.Hex
+'\n' Text
+
+'a' Name.Function
+' ' Text.Whitespace
+'7777777777777777777777777777777777777777777777777774MSJUVU' Literal.Number
+'\n' Text
+
+'a' Name.Function
+' ' Text.Whitespace
+'base32' Literal.String.Affix
+'(aB/c23=)' Literal.String.Other
+'\n' Text
+
+'a' Name.Function
+' ' Text.Whitespace
+'b64 ' Literal.String.Affix
+'aB/c23=' Literal.String.Other
+'\n' Text
diff --git a/tests/snippets/teal/test_strings.txt b/tests/snippets/teal/test_strings.txt
new file mode 100644
index 0000000..4e32633
--- /dev/null
+++ b/tests/snippets/teal/test_strings.txt
@@ -0,0 +1,15 @@
+---input---
+a "abc\x123\n\"de//f"
+
+---tokens---
+'a' Name.Function
+' ' Text.Whitespace
+'"' Literal.String
+'abc' Literal.String
+'\\x12' Literal.String.Escape
+'3' Literal.String
+'\\n' Literal.String.Escape
+'\\"' Literal.String.Escape
+'de//f' Literal.String
+'"' Literal.String
+'\n' Text
diff --git a/tests/snippets/terraform/test_attributes.txt b/tests/snippets/terraform/test_attributes.txt
new file mode 100644
index 0000000..2fcf805
--- /dev/null
+++ b/tests/snippets/terraform/test_attributes.txt
@@ -0,0 +1,155 @@
+---input---
+ description = "Some description"
+
+ availability_zones = ["${aws_instance.web.availability_zone}-foobar"]
+ availability_zones = [aws_instance.web.availability_zone]
+ assume_role_policy = data.aws_iam_policy_document.trust.json
+ policy_arn = aws_iam_policy.assume_roles[0].arn
+
+ value = file("path.txt")
+ value = jsonencode(element("value"))
+
+ tags = {
+ Name = "something"
+ }
+
+ "ENV_VARIABLE_1" = aws_dynamodb_table.loginsights2metrics.name
+ "ENV_VARIABLE_2" = "Some string"
+
+ ignore_changes = [last_modified, filename]
+
+ variable = "aws:MultiFactorAuthPresent"
+
+---tokens---
+' ' Text.Whitespace
+'description' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"Some description"' Literal.String.Double
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'availability_zones' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'"${aws_instance.web.availability_zone}-foobar"' Literal.String.Double
+']' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'availability_zones' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'aws_instance.web.availability_zone' Name.Variable
+']' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'assume_role_policy' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'data.aws_iam_policy_document.trust.json' Name.Variable
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'policy_arn' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'aws_iam_policy.assume_roles[0].arn' Name.Variable
+'\n' Text.Whitespace
+
+' \n ' Text.Whitespace
+'value' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'file' Name.Function
+'(' Punctuation
+'"path.txt"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'value' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'jsonencode' Name.Function
+'(' Punctuation
+'element' Name.Function
+'(' Punctuation
+'"value"' Literal.String.Double
+')' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'tags' Name.Builtin
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'Name' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"something"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"ENV_VARIABLE_1"' Literal.String.Double
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'aws_dynamodb_table.loginsights2metrics.name' Name.Variable
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'"ENV_VARIABLE_2"' Literal.String.Double
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"Some string"' Literal.String.Double
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'ignore_changes' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'last_modified, filename' Name.Builtin
+']' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'variable' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"aws:MultiFactorAuthPresent"' Literal.String.Double
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_backend.txt b/tests/snippets/terraform/test_backend.txt
new file mode 100644
index 0000000..ff02a58
--- /dev/null
+++ b/tests/snippets/terraform/test_backend.txt
@@ -0,0 +1,44 @@
+---input---
+terraform {
+ backend "consul" {
+ address = "demo.consul.io"
+ path = "tfdocs"
+ }
+}
+
+---tokens---
+'terraform' Name.Builtin
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'backend' Keyword.Reserved
+' ' Text.Whitespace
+'"consul"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'address' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"demo.consul.io"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'path' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"tfdocs"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_comment.txt b/tests/snippets/terraform/test_comment.txt
new file mode 100644
index 0000000..9f5c1a7
--- /dev/null
+++ b/tests/snippets/terraform/test_comment.txt
@@ -0,0 +1,64 @@
+---input---
+# Single line comment
+// Non-idiomatic single line comment
+/* multiline
+
+ comment
+
+*/
+provider "azurerm" { # (1)
+ features {}
+}
+
+---tokens---
+'# Single line comment\n' Comment.Single
+
+'// Non-idiomatic single line comment\n' Comment.Single
+
+'/*' Comment.Multiline
+' ' Comment.Multiline
+'m' Comment.Multiline
+'u' Comment.Multiline
+'l' Comment.Multiline
+'t' Comment.Multiline
+'i' Comment.Multiline
+'l' Comment.Multiline
+'i' Comment.Multiline
+'n' Comment.Multiline
+'e' Comment.Multiline
+'\n' Comment.Multiline
+
+'\n' Comment.Multiline
+
+' ' Comment.Multiline
+' ' Comment.Multiline
+'c' Comment.Multiline
+'o' Comment.Multiline
+'m' Comment.Multiline
+'m' Comment.Multiline
+'e' Comment.Multiline
+'n' Comment.Multiline
+'t' Comment.Multiline
+'\n' Comment.Multiline
+
+'\n' Comment.Multiline
+
+'*/' Comment.Multiline
+'\n' Text.Whitespace
+
+'provider' Keyword.Reserved
+' ' Text.Whitespace
+'"azurerm"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+' # (1)\n' Comment.Single
+
+' ' Text.Whitespace
+'features' Name.Builtin
+' ' Text.Whitespace
+'{' Punctuation
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+' \n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_functions.txt b/tests/snippets/terraform/test_functions.txt
new file mode 100644
index 0000000..b00c7a5
--- /dev/null
+++ b/tests/snippets/terraform/test_functions.txt
@@ -0,0 +1,56 @@
+---input---
+provider "aws" {
+ value = file("path.txt")
+}
+
+provider "aws" {
+ value = jsonencode(element("value"))
+}
+
+---tokens---
+'provider' Keyword.Reserved
+' ' Text.Whitespace
+'"aws"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'value' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'file' Name.Function
+'(' Punctuation
+'"path.txt"' Literal.String.Double
+')' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'provider' Keyword.Reserved
+' ' Text.Whitespace
+'"aws"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'value' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'jsonencode' Name.Function
+'(' Punctuation
+'element' Name.Function
+'(' Punctuation
+'"value"' Literal.String.Double
+')' Punctuation
+')' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_heredoc.txt b/tests/snippets/terraform/test_heredoc.txt
new file mode 100644
index 0000000..a8b1cb3
--- /dev/null
+++ b/tests/snippets/terraform/test_heredoc.txt
@@ -0,0 +1,65 @@
+---input---
+resource "local_file" "heredoc" {
+ content = <<-DOC
+ heredoc content
+ DOC
+}
+
+resource "local_file" "heredoc" {
+ content = <<DOC
+ heredoc content
+ DOC
+}
+
+---tokens---
+'resource' Keyword.Reserved
+' ' Text.Whitespace
+'"local_file"' Name.Class
+' ' Text.Whitespace
+'"heredoc"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'content' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'<<-' Operator
+'DOC' Literal.String.Delimiter
+'\n' Literal.String.Heredoc
+
+' heredoc content\n' Literal.String.Heredoc
+
+' DOC\n' Literal.String.Delimiter
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'resource' Keyword.Reserved
+' ' Text.Whitespace
+'"local_file"' Name.Class
+' ' Text.Whitespace
+'"heredoc"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'content' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'<<' Operator
+'DOC' Literal.String.Delimiter
+'\n' Literal.String.Heredoc
+
+' heredoc content\n' Literal.String.Heredoc
+
+' DOC\n' Literal.String.Delimiter
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_module.txt b/tests/snippets/terraform/test_module.txt
new file mode 100644
index 0000000..5a5876b
--- /dev/null
+++ b/tests/snippets/terraform/test_module.txt
@@ -0,0 +1,32 @@
+---input---
+module "consul" {
+ source = "hashicorp/consul/aws"
+ servers = 3
+}
+
+---tokens---
+'module' Keyword.Reserved
+' ' Text.Whitespace
+'"consul"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'source' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"hashicorp/consul/aws"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'servers' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'3' Literal.Number
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_resource.txt b/tests/snippets/terraform/test_resource.txt
new file mode 100644
index 0000000..7b2815a
--- /dev/null
+++ b/tests/snippets/terraform/test_resource.txt
@@ -0,0 +1,211 @@
+---input---
+resource "aws_internet_gateway" "base_igw" {
+ vpc_id = aws_vpc.something.id
+ tags = {
+ Name = "igw-${var.something}-${var.something}"
+ }
+}
+
+resource "aws_security_group" "allow_tls" {
+ name = "allow_tls"
+ description = "Allow TLS inbound traffic"
+ vpc_id = aws_vpc.main.id
+
+ # Ingress rules
+ ingress {
+ description = "TLS from VPC"
+ from_port = 443
+ to_port = 443
+ }
+
+ # Egress rules
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = {
+ Name = "allow_tls"
+ }
+}
+
+---tokens---
+'resource' Keyword.Reserved
+' ' Text.Whitespace
+'"aws_internet_gateway"' Name.Class
+' ' Text.Whitespace
+'"base_igw"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'vpc_id' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'aws_vpc.something.id' Name.Variable
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'tags' Name.Builtin
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'Name' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"igw-${var.something}-${var.something}"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'resource' Keyword.Reserved
+' ' Text.Whitespace
+'"aws_security_group"' Name.Class
+' ' Text.Whitespace
+'"allow_tls"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'name' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"allow_tls"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'description' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"Allow TLS inbound traffic"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'vpc_id' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'aws_vpc.main.id' Name.Variable
+'\n\n # Ingress rules\n' Comment.Single
+
+' ' Text.Whitespace
+'ingress' Name.Builtin
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'description' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"TLS from VPC"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'from_port' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'443' Literal.Number
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'to_port' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'443' Literal.Number
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n\n # Egress rules\n' Comment.Single
+
+' ' Text.Whitespace
+'egress' Name.Builtin
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'from_port' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'0' Literal.Number
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'to_port' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'0' Literal.Number
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'protocol' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"-1"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'cidr_blocks' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'"0.0.0.0/0"' Literal.String.Double
+']' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'tags' Name.Builtin
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'Name' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"allow_tls"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_types.txt b/tests/snippets/terraform/test_types.txt
new file mode 100644
index 0000000..b689244
--- /dev/null
+++ b/tests/snippets/terraform/test_types.txt
@@ -0,0 +1,94 @@
+---input---
+backend "consul" {
+data "aws_ami" "example" {
+module "consul" {
+output "instance_ip_addr" {
+provider "aws" {
+provisioner "local-exec" {
+resource "aws_internet_gateway" "base_igw" {
+variable "aws_region" {
+variable "set-str" {
+ type = set(string)
+}
+
+---tokens---
+'backend' Keyword.Reserved
+' ' Text.Whitespace
+'"consul"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'data' Keyword.Reserved
+' ' Text.Whitespace
+'"aws_ami"' Name.Class
+' ' Text.Whitespace
+'"example"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'module' Keyword.Reserved
+' ' Text.Whitespace
+'"consul"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'output' Keyword.Reserved
+' ' Text.Whitespace
+'"instance_ip_addr"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'provider' Keyword.Reserved
+' ' Text.Whitespace
+'"aws"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'provisioner' Keyword.Reserved
+' ' Text.Whitespace
+'"local-exec"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'resource' Keyword.Reserved
+' ' Text.Whitespace
+'"aws_internet_gateway"' Name.Class
+' ' Text.Whitespace
+'"base_igw"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'variable' Keyword.Reserved
+' ' Text.Whitespace
+'"aws_region"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'variable' Keyword.Reserved
+' ' Text.Whitespace
+'"set-str"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'type' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'set' Keyword.Type
+'(' Punctuation
+'string' Keyword.Type
+')' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_variable_declaration.txt b/tests/snippets/terraform/test_variable_declaration.txt
new file mode 100644
index 0000000..72e515e
--- /dev/null
+++ b/tests/snippets/terraform/test_variable_declaration.txt
@@ -0,0 +1,41 @@
+---input---
+variable "aws_region" {
+ description = "AWS region to launch servers."
+ default = "us-west-2"
+ somevar = true
+}
+
+---tokens---
+'variable' Keyword.Reserved
+' ' Text.Whitespace
+'"aws_region"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'description' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"AWS region to launch servers."' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'default' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"us-west-2"' Literal.String.Double
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'somevar' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'true' Name.Constant
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/terraform/test_variable_read.txt b/tests/snippets/terraform/test_variable_read.txt
new file mode 100644
index 0000000..25afcf4
--- /dev/null
+++ b/tests/snippets/terraform/test_variable_read.txt
@@ -0,0 +1,23 @@
+---input---
+provider "aws" {
+ region = var.aws_region
+}
+
+---tokens---
+'provider' Keyword.Reserved
+' ' Text.Whitespace
+'"aws"' Name.Variable
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+' ' Text.Whitespace
+'region' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'var.aws_region' Name.Variable
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/turtle/test_prefixed_name_starting_with_number.txt b/tests/snippets/turtle/test_prefixed_name_starting_with_number.txt
new file mode 100644
index 0000000..ca1c293
--- /dev/null
+++ b/tests/snippets/turtle/test_prefixed_name_starting_with_number.txt
@@ -0,0 +1,8 @@
+---input---
+alice:6f6e4241-75a2-4780-9b2a-40da53082e54
+
+---tokens---
+'alice' Name.Namespace
+':' Punctuation
+'6f6e4241-75a2-4780-9b2a-40da53082e54' Name.Tag
+'\n' Text
diff --git a/tests/snippets/typescript/test_function_definition.txt b/tests/snippets/typescript/test_function_definition.txt
new file mode 100644
index 0000000..925c380
--- /dev/null
+++ b/tests/snippets/typescript/test_function_definition.txt
@@ -0,0 +1,18 @@
+---input---
+async function main() {
+}
+
+---tokens---
+'async' Keyword
+' ' Text.Whitespace
+'function' Keyword.Declaration
+' ' Text.Whitespace
+'main' Name.Other
+'(' Punctuation
+')' Punctuation
+' ' Text.Whitespace
+'{' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/unixconfig/etc_group.txt b/tests/snippets/unixconfig/etc_group.txt
new file mode 100644
index 0000000..3294ed5
--- /dev/null
+++ b/tests/snippets/unixconfig/etc_group.txt
@@ -0,0 +1,45 @@
+---input---
+root:x:0:
+sudo:x:1:syslog,user
+syslog:x:2:
+#adm:x:3:
+
+user:x:1000
+
+---tokens---
+'root' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'\n' Text.Whitespace
+
+'sudo' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'1' Literal.Number
+':' Punctuation
+'syslog,user' Text
+'\n' Text.Whitespace
+
+'syslog' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'2' Literal.Number
+':' Punctuation
+'\n' Text.Whitespace
+
+'#adm:x:3:' Comment
+'\n' Text.Whitespace
+
+'\n' Text.Whitespace
+
+'user' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'1000' Literal.Number
+'\n' Text.Whitespace
diff --git a/tests/snippets/unixconfig/etc_passwd.txt b/tests/snippets/unixconfig/etc_passwd.txt
new file mode 100644
index 0000000..540e41f
--- /dev/null
+++ b/tests/snippets/unixconfig/etc_passwd.txt
@@ -0,0 +1,86 @@
+---input---
+root:x:0:0:root:/root:/bin/bash
+daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
+#irc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin
+gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
+nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
+systemd-network:x:100:102:systemd Network Management,,,:/run/systemd:/usr/sbin/nologin
+
+---tokens---
+'root' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'root' Text
+':' Punctuation
+'/root' Literal.String
+':' Punctuation
+'/bin/bash' Literal.String
+'\n' Text.Whitespace
+
+'daemon' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'1' Literal.Number
+':' Punctuation
+'1' Literal.Number
+':' Punctuation
+'daemon' Text
+':' Punctuation
+'/usr/sbin' Literal.String
+':' Punctuation
+'/usr/sbin/nologin' Literal.String
+'\n' Text.Whitespace
+
+'#irc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin' Comment
+'\n' Text.Whitespace
+
+'gnats' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'41' Literal.Number
+':' Punctuation
+'41' Literal.Number
+':' Punctuation
+'Gnats Bug-Reporting System (admin)' Text
+':' Punctuation
+'/var/lib/gnats' Literal.String
+':' Punctuation
+'/usr/sbin/nologin' Literal.String
+'\n' Text.Whitespace
+
+'nobody' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'65534' Literal.Number
+':' Punctuation
+'65534' Literal.Number
+':' Punctuation
+'nobody' Text
+':' Punctuation
+'/nonexistent' Literal.String
+':' Punctuation
+'/usr/sbin/nologin' Literal.String
+'\n' Text.Whitespace
+
+'systemd-network' Text
+':' Punctuation
+'x' Literal.String
+':' Punctuation
+'100' Literal.Number
+':' Punctuation
+'102' Literal.Number
+':' Punctuation
+'systemd Network Management,,,' Text
+':' Punctuation
+'/run/systemd' Literal.String
+':' Punctuation
+'/usr/sbin/nologin' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/unixconfig/etc_shadow.txt b/tests/snippets/unixconfig/etc_shadow.txt
new file mode 100644
index 0000000..6b1d92a
--- /dev/null
+++ b/tests/snippets/unixconfig/etc_shadow.txt
@@ -0,0 +1,74 @@
+---input---
+root:$6$L95fNbtS$IZ8affe7h2B.DF81HZ:17262:0:14600:14:::
+#nobody:*:18375:0:99999:7:::
+bin:*:17110:0:99999:7:::
+user:$6$KmghZnvbZs7f3SQ9$H6f0M61q5Cf8JLrS0kR3M97/o6GzD6FH3MbLs92CM/l9mHZ7FngBzRfa8D5NrWl.K8nM64affeWrY/L0U7nBt/:19097:0:99999:7:::
+linoadmin:!!:17289:0:99999:7:::
+
+---tokens---
+'root' Text
+':' Punctuation
+'$6$L95fNbtS$IZ8affe7h2B.DF81HZ' Literal.String
+':' Punctuation
+'17262' Literal.Number
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'14600' Literal.Number
+':' Punctuation
+'14' Literal.Number
+':' Punctuation
+':' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
+
+'#nobody:*:18375:0:99999:7:::' Comment
+'\n' Text.Whitespace
+
+'bin' Text
+':' Punctuation
+'*' Literal.String
+':' Punctuation
+'17110' Literal.Number
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'99999' Literal.Number
+':' Punctuation
+'7' Literal.Number
+':' Punctuation
+':' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
+
+'user' Text
+':' Punctuation
+'$6$KmghZnvbZs7f3SQ9$H6f0M61q5Cf8JLrS0kR3M97/o6GzD6FH3MbLs92CM/l9mHZ7FngBzRfa8D5NrWl.K8nM64affeWrY/L0U7nBt/' Literal.String
+':' Punctuation
+'19097' Literal.Number
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'99999' Literal.Number
+':' Punctuation
+'7' Literal.Number
+':' Punctuation
+':' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
+
+'linoadmin' Text
+':' Punctuation
+'!!' Literal.String
+':' Punctuation
+'17289' Literal.Number
+':' Punctuation
+'0' Literal.Number
+':' Punctuation
+'99999' Literal.Number
+':' Punctuation
+'7' Literal.Number
+':' Punctuation
+':' Punctuation
+':' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_attribute.txt b/tests/snippets/usd/test_attribute.txt
new file mode 100644
index 0000000..74e6789
--- /dev/null
+++ b/tests/snippets/usd/test_attribute.txt
@@ -0,0 +1,174 @@
+---input---
+double foo = 8.0
+
+custom double foo = 8.0
+
+uniform double foo = 8.0
+
+custom uniform double foo = 8.0
+
+custom double foo_underscore_name = 8.0
+
+double[] foo_underscore_name = [10.1, 12.0, 13]
+
+double[] primvar:foo_thing = [10.1, 12.0, 13]
+
+custom int[] foo = [8, 10, 14]
+
+custom int[] foo.timeSamples = {
+ 1: [8, 0, 14],
+ 2: [-8, 0, 14],
+}
+
+---tokens---
+'double' Keyword.Type
+' ' Text.Whitespace
+'foo' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'8.0' Literal.Number
+'\n\n' Text.Whitespace
+
+'custom' Keyword.Token
+' ' Text.Whitespace
+'double' Keyword.Type
+' ' Text.Whitespace
+'foo' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'8.0' Literal.Number
+'\n\n' Text.Whitespace
+
+'uniform' Keyword.Token
+' ' Text.Whitespace
+'double' Keyword.Type
+' ' Text.Whitespace
+'foo' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'8.0' Literal.Number
+'\n\n' Text.Whitespace
+
+'custom' Keyword.Token
+' ' Text.Whitespace
+'uniform' Keyword.Token
+' ' Text.Whitespace
+'double' Keyword.Type
+' ' Text.Whitespace
+'foo' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'8.0' Literal.Number
+'\n\n' Text.Whitespace
+
+'custom' Keyword.Token
+' ' Text.Whitespace
+'double' Keyword.Type
+' ' Text.Whitespace
+'foo_underscore_name' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'8.0' Literal.Number
+'\n\n' Text.Whitespace
+
+'double[]' Keyword.Type
+' ' Text.Whitespace
+'foo_underscore_name' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'10.1' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'12.0' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'13' Literal.Number
+']' Punctuation
+'\n\n' Text.Whitespace
+
+'double[]' Keyword.Type
+' ' Text.Whitespace
+'primvar:foo_thing' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'10.1' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'12.0' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'13' Literal.Number
+']' Punctuation
+'\n\n' Text.Whitespace
+
+'custom' Keyword.Token
+' ' Text.Whitespace
+'int[]' Keyword.Type
+' ' Text.Whitespace
+'foo' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'8' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'10' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'14' Literal.Number
+']' Punctuation
+'\n\n' Text.Whitespace
+
+'custom' Keyword.Token
+' ' Text.Whitespace
+'int[]' Keyword.Type
+' ' Text.Whitespace
+'foo' Name.Attribute
+'.' Text
+'timeSamples' Name.Keyword.Tokens
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'1' Literal.Number
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'8' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'14' Literal.Number
+']' Punctuation
+',' Punctuation
+'\n ' Text.Whitespace
+'2' Literal.Number
+':' Punctuation
+' ' Text.Whitespace
+'[' Punctuation
+'-8' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'0' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'14' Literal.Number
+']' Punctuation
+',' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_composition_arcs.txt b/tests/snippets/usd/test_composition_arcs.txt
new file mode 100644
index 0000000..0270c93
--- /dev/null
+++ b/tests/snippets/usd/test_composition_arcs.txt
@@ -0,0 +1,101 @@
+---input---
+def Xform "BottleMedical" (
+ kind = "prop"
+ payload = @./BottleMedical_payload.usd@</BottleMedical>
+ variants = {
+ string modelingVariant = "LiquidBottleLg"
+ string shadingComplexity = "full"
+ }
+ add variantSets = ["modelingVariant", "shadingComplexity"]
+)
+{
+ variantSet "modelingVariant" = {
+ "ALL_VARIANTS" {
+ }
+ }
+}
+
+---tokens---
+'def' Keyword.Tokens
+' ' Text.Whitespace
+'Xform' Text
+' ' Text.Whitespace
+'"BottleMedical"' Literal.String
+' ' Text.Whitespace
+'(' Punctuation
+'\n ' Text.Whitespace
+'kind' Name.Builtins
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"prop"' Literal.String
+'\n ' Text.Whitespace
+'payload' Keyword.Tokens
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'@./BottleMedical_payload.usd@' Literal.String.Interpol
+'</BottleMedical>' Name.Namespace
+'\n ' Text.Whitespace
+'variants' Keyword.Tokens
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'string' Keyword.Type
+' ' Text.Whitespace
+'modelingVariant' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"LiquidBottleLg"' Literal.String
+'\n ' Text.Whitespace
+'string' Keyword.Type
+' ' Text.Whitespace
+'shadingComplexity' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"full"' Literal.String
+'\n ' Text.Whitespace
+'}' Punctuation
+'\n ' Text.Whitespace
+'add' Keyword.Type
+' ' Text.Whitespace
+'variantSets' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'"modelingVariant"' Literal.String
+',' Punctuation
+' ' Text.Whitespace
+'"shadingComplexity"' Literal.String
+']' Punctuation
+'\n' Text.Whitespace
+
+')' Punctuation
+'\n' Text.Whitespace
+
+'{' Punctuation
+'\n ' Text.Whitespace
+'variantSet' Keyword.Tokens
+' ' Text.Whitespace
+'"modelingVariant"' Literal.String
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'"ALL_VARIANTS"' Literal.String
+' ' Text.Whitespace
+'{' Punctuation
+'\n ' Text.Whitespace
+'}' Punctuation
+'\n ' Text.Whitespace
+'}' Punctuation
+'\n' Text.Whitespace
+
+'}' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_metadata.txt b/tests/snippets/usd/test_metadata.txt
new file mode 100644
index 0000000..edc4614
--- /dev/null
+++ b/tests/snippets/usd/test_metadata.txt
@@ -0,0 +1,36 @@
+# Make sure metadata [the stuff inside ()s] don't match as Attributes.
+
+---input---
+float[] primvars:skel:jointWeights = [1] (
+ elementSize = 1
+ interpolation = "constant"
+)
+
+---tokens---
+'float[]' Keyword.Type
+' ' Text.Whitespace
+'primvars:skel:jointWeights' Name.Attribute
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'[' Punctuation
+'1' Literal.Number
+']' Punctuation
+' ' Text.Whitespace
+'(' Punctuation
+'\n ' Text.Whitespace
+'elementSize' Name.Builtins
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'1' Literal.Number
+'\n ' Text.Whitespace
+'interpolation' Name.Builtins
+' ' Text.Whitespace
+'=' Operator
+' ' Text.Whitespace
+'"constant"' Literal.String
+'\n' Text.Whitespace
+
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_numbers.txt b/tests/snippets/usd/test_numbers.txt
new file mode 100644
index 0000000..a4e0f88
--- /dev/null
+++ b/tests/snippets/usd/test_numbers.txt
@@ -0,0 +1,21 @@
+---input---
+8 8.0123312132, -4 -14.123 1e10 0.1e10 10.123e+10 0.123e-14
+
+---tokens---
+'8' Literal.Number
+' ' Text.Whitespace
+'8.0123312132' Literal.Number
+',' Punctuation
+' ' Text.Whitespace
+'-4' Literal.Number
+' ' Text.Whitespace
+'-14.123' Literal.Number
+' ' Text.Whitespace
+'1e10' Literal.Number
+' ' Text.Whitespace
+'0.1e10' Literal.Number
+' ' Text.Whitespace
+'10.123e+10' Literal.Number
+' ' Text.Whitespace
+'0.123e-14' Literal.Number
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_outer_match_at_sign.txt b/tests/snippets/usd/test_outer_match_at_sign.txt
new file mode 100644
index 0000000..de0bc72
--- /dev/null
+++ b/tests/snippets/usd/test_outer_match_at_sign.txt
@@ -0,0 +1,14 @@
+# Make sure that text between located between quotes and @@s are not matched.
+
+---input---
+@firststring@ something else @secondstring@
+
+---tokens---
+'@firststring@' Literal.String.Interpol
+' ' Text.Whitespace
+'something' Text
+' ' Text.Whitespace
+'else' Text
+' ' Text.Whitespace
+'@secondstring@' Literal.String.Interpol
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_outer_match_double.txt b/tests/snippets/usd/test_outer_match_double.txt
new file mode 100644
index 0000000..773da13
--- /dev/null
+++ b/tests/snippets/usd/test_outer_match_double.txt
@@ -0,0 +1,12 @@
+---input---
+'firststring' something else 'secondstring'
+
+---tokens---
+"'firststring'" Literal.String
+' ' Text.Whitespace
+'something' Text
+' ' Text.Whitespace
+'else' Text
+' ' Text.Whitespace
+"'secondstring'" Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_outer_match_single.txt b/tests/snippets/usd/test_outer_match_single.txt
new file mode 100644
index 0000000..773da13
--- /dev/null
+++ b/tests/snippets/usd/test_outer_match_single.txt
@@ -0,0 +1,12 @@
+---input---
+'firststring' something else 'secondstring'
+
+---tokens---
+"'firststring'" Literal.String
+' ' Text.Whitespace
+'something' Text
+' ' Text.Whitespace
+'else' Text
+' ' Text.Whitespace
+"'secondstring'" Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_string_multiple_line.txt b/tests/snippets/usd/test_string_multiple_line.txt
new file mode 100644
index 0000000..b3ab1fd
--- /dev/null
+++ b/tests/snippets/usd/test_string_multiple_line.txt
@@ -0,0 +1,20 @@
+---input---
+"""
+Some text multiline
+"""
+
+"""Some text multiline
+"""
+
+"""
+Some text multiline"""
+
+---tokens---
+'"""\nSome text multiline\n"""' Literal.String
+'\n\n' Text.Whitespace
+
+'"""Some text multiline\n"""' Literal.String
+'\n\n' Text.Whitespace
+
+'"""\nSome text multiline"""' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_string_priority.txt b/tests/snippets/usd/test_string_priority.txt
new file mode 100644
index 0000000..481a416
--- /dev/null
+++ b/tests/snippets/usd/test_string_priority.txt
@@ -0,0 +1,10 @@
+# Make sure that no other rules override a string match.
+
+---input---
+"""
+custom int[] foo = [8, 10, 14]
+"""
+
+---tokens---
+'"""\ncustom int[] foo = [8, 10, 14]\n"""' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/usd/test_string_single_line.txt b/tests/snippets/usd/test_string_single_line.txt
new file mode 100644
index 0000000..b5058ce
--- /dev/null
+++ b/tests/snippets/usd/test_string_single_line.txt
@@ -0,0 +1,6 @@
+---input---
+"Some 'text"
+
+---tokens---
+'"Some \'text"' Literal.String
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_floats.txt b/tests/snippets/vbscript/test_floats.txt
new file mode 100644
index 0000000..9493a3b
--- /dev/null
+++ b/tests/snippets/vbscript/test_floats.txt
@@ -0,0 +1,34 @@
+---input---
+1.
+1.e1
+.1
+1.2
+1.2e3
+1.2e+3
+1.2e-3
+1e2
+
+---tokens---
+'1.' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.e1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'.1' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.2' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.2e3' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.2e+3' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1.2e-3' Literal.Number.Float
+'\n' Text.Whitespace
+
+'1e2' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_floats_multiple.txt b/tests/snippets/vbscript/test_floats_multiple.txt
new file mode 100644
index 0000000..30a3708
--- /dev/null
+++ b/tests/snippets/vbscript/test_floats_multiple.txt
@@ -0,0 +1,7 @@
+---input---
+1e2.1e2
+
+---tokens---
+'1e2' Literal.Number.Float
+'.1e2' Literal.Number.Float
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_integers.txt b/tests/snippets/vbscript/test_integers.txt
new file mode 100644
index 0000000..132ef7e
--- /dev/null
+++ b/tests/snippets/vbscript/test_integers.txt
@@ -0,0 +1,14 @@
+---input---
+1
+23
+456
+
+---tokens---
+'1' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'23' Literal.Number.Integer
+'\n' Text.Whitespace
+
+'456' Literal.Number.Integer
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_invalid_character.txt b/tests/snippets/vbscript/test_invalid_character.txt
new file mode 100644
index 0000000..6a1e6f1
--- /dev/null
+++ b/tests/snippets/vbscript/test_invalid_character.txt
@@ -0,0 +1,10 @@
+---input---
+a;bc
+d
+
+---tokens---
+'a' Name
+';bc\n' Error
+
+'d' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_names.txt b/tests/snippets/vbscript/test_names.txt
new file mode 100644
index 0000000..404844f
--- /dev/null
+++ b/tests/snippets/vbscript/test_names.txt
@@ -0,0 +1,18 @@
+---input---
+thingy
+thingy123
+_thingy
+_123
+
+---tokens---
+'thingy' Name
+'\n' Text.Whitespace
+
+'thingy123' Name
+'\n' Text.Whitespace
+
+'_thingy' Name
+'\n' Text.Whitespace
+
+'_123' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_reject_almost_float.txt b/tests/snippets/vbscript/test_reject_almost_float.txt
new file mode 100644
index 0000000..478e6a3
--- /dev/null
+++ b/tests/snippets/vbscript/test_reject_almost_float.txt
@@ -0,0 +1,7 @@
+---input---
+.e1
+
+---tokens---
+'.' Punctuation
+'e1' Name
+'\n' Text.Whitespace
diff --git a/tests/snippets/vbscript/test_unterminated_string.txt b/tests/snippets/vbscript/test_unterminated_string.txt
new file mode 100644
index 0000000..e92060b
--- /dev/null
+++ b/tests/snippets/vbscript/test_unterminated_string.txt
@@ -0,0 +1,7 @@
+---input---
+"x\nx
+
+---tokens---
+'"' Literal.String.Double
+'x\\nx' Literal.String.Double
+'\n' Error
diff --git a/tests/snippets/wat/test_align_and_offset_accept_hexadecimal_numbers.txt b/tests/snippets/wat/test_align_and_offset_accept_hexadecimal_numbers.txt
new file mode 100644
index 0000000..919e1d2
--- /dev/null
+++ b/tests/snippets/wat/test_align_and_offset_accept_hexadecimal_numbers.txt
@@ -0,0 +1,14 @@
+---input---
+i32.store offset=0xdeadbeef align=0x1
+
+---tokens---
+'i32.store' Name.Builtin
+' ' Text
+'offset' Keyword
+'=' Operator
+'0xdeadbeef' Literal.Number.Hex
+' ' Text
+'align' Keyword
+'=' Operator
+'0x1' Literal.Number.Hex
+'\n' Text
diff --git a/tests/snippets/wat/test_comment_with_open_paren.txt b/tests/snippets/wat/test_comment_with_open_paren.txt
new file mode 100644
index 0000000..631de4c
--- /dev/null
+++ b/tests/snippets/wat/test_comment_with_open_paren.txt
@@ -0,0 +1,10 @@
+---input---
+(; comment with ( open paren ;)
+
+---tokens---
+'(;' Comment.Multiline
+' comment with ' Comment.Multiline
+'(' Comment.Multiline
+' open paren ' Comment.Multiline
+';)' Comment.Multiline
+'\n' Text
diff --git a/tests/snippets/wat/test_comment_with_semicolon.txt b/tests/snippets/wat/test_comment_with_semicolon.txt
new file mode 100644
index 0000000..0cd3112
--- /dev/null
+++ b/tests/snippets/wat/test_comment_with_semicolon.txt
@@ -0,0 +1,10 @@
+---input---
+(; comment with ; semicolon ;)
+
+---tokens---
+'(;' Comment.Multiline
+' comment with ' Comment.Multiline
+';' Comment.Multiline
+' semicolon ' Comment.Multiline
+';)' Comment.Multiline
+'\n' Text
diff --git a/tests/snippets/wat/test_i32_const_is_builtin.txt b/tests/snippets/wat/test_i32_const_is_builtin.txt
new file mode 100644
index 0000000..740907c
--- /dev/null
+++ b/tests/snippets/wat/test_i32_const_is_builtin.txt
@@ -0,0 +1,6 @@
+---input---
+i32.const
+
+---tokens---
+'i32.const' Name.Builtin
+'\n' Text
diff --git a/tests/snippets/wat/test_multiline_comment.txt b/tests/snippets/wat/test_multiline_comment.txt
new file mode 100644
index 0000000..6cbd45e
--- /dev/null
+++ b/tests/snippets/wat/test_multiline_comment.txt
@@ -0,0 +1,11 @@
+---input---
+(;
+ comment
+;)
+
+---tokens---
+'(;' Comment.Multiline
+'\n comment\n' Comment.Multiline
+
+';)' Comment.Multiline
+'\n' Text
diff --git a/tests/snippets/wat/test_nested_comment.txt b/tests/snippets/wat/test_nested_comment.txt
new file mode 100644
index 0000000..de07293
--- /dev/null
+++ b/tests/snippets/wat/test_nested_comment.txt
@@ -0,0 +1,14 @@
+---input---
+(;
+nested(;;)comment
+;)
+
+---tokens---
+'(;' Comment.Multiline
+'\nnested' Comment.Multiline
+'(;' Comment.Multiline
+';)' Comment.Multiline
+'comment\n' Comment.Multiline
+
+';)' Comment.Multiline
+'\n' Text
diff --git a/tests/snippets/wat/test_string_byte_escape.txt b/tests/snippets/wat/test_string_byte_escape.txt
new file mode 100644
index 0000000..c0b9e4a
--- /dev/null
+++ b/tests/snippets/wat/test_string_byte_escape.txt
@@ -0,0 +1,9 @@
+---input---
+"\001"
+
+---tokens---
+'"' Literal.String.Double
+'\\00' Literal.String.Escape
+'1' Literal.String.Double
+'"' Literal.String.Double
+'\n' Text
diff --git a/tests/snippets/wat/test_string_with_escape.txt b/tests/snippets/wat/test_string_with_escape.txt
new file mode 100644
index 0000000..c978faa
--- /dev/null
+++ b/tests/snippets/wat/test_string_with_escape.txt
@@ -0,0 +1,9 @@
+---input---
+"string\t"
+
+---tokens---
+'"' Literal.String.Double
+'string' Literal.String.Double
+'\\t' Literal.String.Escape
+'"' Literal.String.Double
+'\n' Text
diff --git a/tests/snippets/wat/test_variable_name_pattern.txt b/tests/snippets/wat/test_variable_name_pattern.txt
new file mode 100644
index 0000000..d305ab9
--- /dev/null
+++ b/tests/snippets/wat/test_variable_name_pattern.txt
@@ -0,0 +1,6 @@
+---input---
+$ABCabc123!#$%&'*+./:<=>?@\\^_`|~-A
+
+---tokens---
+"$ABCabc123!#$%&'*+./:<=>?@\\\\^_`|~-A" Name.Variable
+'\n' Text
diff --git a/tests/snippets/whiley/test_whiley_operator.txt b/tests/snippets/whiley/test_whiley_operator.txt
new file mode 100644
index 0000000..50761db
--- /dev/null
+++ b/tests/snippets/whiley/test_whiley_operator.txt
@@ -0,0 +1,10 @@
+---input---
+123 ∀ x
+
+---tokens---
+'123' Literal.Number.Integer
+' ' Text
+'∀' Operator
+' ' Text
+'x' Name
+'\n' Text
diff --git a/tests/snippets/wren/lonely-paren.txt b/tests/snippets/wren/lonely-paren.txt
new file mode 100644
index 0000000..5236e60
--- /dev/null
+++ b/tests/snippets/wren/lonely-paren.txt
@@ -0,0 +1,10 @@
+---input---
+// This invalid input should terminate.
+)
+
+---tokens---
+'// This invalid input should terminate.' Comment.Single
+'\n' Text.Whitespace
+
+')' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/xml/multiline-comment-catastrophic-backtracking.txt b/tests/snippets/xml/multiline-comment-catastrophic-backtracking.txt
new file mode 100644
index 0000000..d9a8b7a
--- /dev/null
+++ b/tests/snippets/xml/multiline-comment-catastrophic-backtracking.txt
@@ -0,0 +1,56 @@
+---input---
+<!--
+this
+comment
+is
+never
+terminated
+...
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+...
+
+---tokens---
+'<' Error
+'!--' Text
+'\n' Text.Whitespace
+
+'this' Text
+'\n' Text.Whitespace
+
+'comment' Text
+'\n' Text.Whitespace
+
+'is' Text
+'\n' Text.Whitespace
+
+'never' Text
+'\n' Text.Whitespace
+
+'terminated' Text
+'\n' Text.Whitespace
+
+'...' Text
+'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n' Text.Whitespace
+
+'...' Text
+'\n' Text.Whitespace
diff --git a/tests/snippets/yaml/test_yaml.txt b/tests/snippets/yaml/test_yaml.txt
new file mode 100644
index 0000000..0dd3911
--- /dev/null
+++ b/tests/snippets/yaml/test_yaml.txt
@@ -0,0 +1,13 @@
+# Bug #1528: This previously parsed 'token # innocent' as a tag
+
+---input---
+here: token # innocent: comment
+
+---tokens---
+'here' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'token' Literal.Scalar.Plain
+' ' Text.Whitespace
+'# innocent: comment' Comment.Single
+'\n' Text.Whitespace
diff --git a/tests/snippets/yaml/test_yaml_colon_in_key.txt b/tests/snippets/yaml/test_yaml_colon_in_key.txt
new file mode 100644
index 0000000..9f3d313
--- /dev/null
+++ b/tests/snippets/yaml/test_yaml_colon_in_key.txt
@@ -0,0 +1,11 @@
+# Colon in the key name is accepted by the YAML specs too
+
+---input---
+foo:bar: value
+
+---tokens---
+'foo:bar' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'value' Literal.Scalar.Plain
+'\n' Text.Whitespace
diff --git a/tests/snippets/yaml/test_yaml_colon_in_key_double.txt b/tests/snippets/yaml/test_yaml_colon_in_key_double.txt
new file mode 100644
index 0000000..6f68b6d
--- /dev/null
+++ b/tests/snippets/yaml/test_yaml_colon_in_key_double.txt
@@ -0,0 +1,11 @@
+# Colons in the key name is accepted by the YAML specs too
+
+---input---
+foo::bar: value
+
+---tokens---
+'foo::bar' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'value' Literal.Scalar.Plain
+'\n' Text.Whitespace
diff --git a/tests/snippets/yaml/test_yaml_colon_in_key_start.txt b/tests/snippets/yaml/test_yaml_colon_in_key_start.txt
new file mode 100644
index 0000000..60d4e23
--- /dev/null
+++ b/tests/snippets/yaml/test_yaml_colon_in_key_start.txt
@@ -0,0 +1,11 @@
+# Colon at the beginning of the key name is accepted by the YAML specs too
+
+---input---
+:foo: value
+
+---tokens---
+':foo' Name.Tag
+':' Punctuation
+' ' Text.Whitespace
+'value' Literal.Scalar.Plain
+'\n' Text.Whitespace
diff --git a/tests/snippets/yang/test_float_value.txt b/tests/snippets/yang/test_float_value.txt
new file mode 100644
index 0000000..b49f479
--- /dev/null
+++ b/tests/snippets/yang/test_float_value.txt
@@ -0,0 +1,11 @@
+# Float value `1.1` should be explicitly highlighted
+
+---input---
+yang-version 1.1;
+
+---tokens---
+'yang-version' Keyword
+' ' Text.Whitespace
+'1.1' Literal.Number.Float
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/yang/test_integer_value.txt b/tests/snippets/yang/test_integer_value.txt
new file mode 100644
index 0000000..149d19c
--- /dev/null
+++ b/tests/snippets/yang/test_integer_value.txt
@@ -0,0 +1,11 @@
+# Integer value `5` should be explicitly highlighted
+
+---input---
+value 5;
+
+---tokens---
+'value' Keyword
+' ' Text.Whitespace
+'5' Literal.Number.Integer
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/yang/test_namespace_1.txt b/tests/snippets/yang/test_namespace_1.txt
new file mode 100644
index 0000000..1caf138
--- /dev/null
+++ b/tests/snippets/yang/test_namespace_1.txt
@@ -0,0 +1,11 @@
+# Namespace `urn:test:std:yang` should not be explicitly highlighted
+
+---input---
+namespace urn:test:std:yang;
+
+---tokens---
+'namespace' Keyword
+' ' Text.Whitespace
+'urn:test:std:yang' Name.Variable
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/yang/test_namespace_2.txt b/tests/snippets/yang/test_namespace_2.txt
new file mode 100644
index 0000000..a245f7c
--- /dev/null
+++ b/tests/snippets/yang/test_namespace_2.txt
@@ -0,0 +1,13 @@
+# namespace-prefix `yang` should be explicitly highlighted
+
+---input---
+type yang:counter64;
+
+---tokens---
+'type' Keyword
+' ' Text.Whitespace
+'yang' Name.Namespace
+':' Punctuation
+'counter64' Name.Variable
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/yang/test_revision_date.txt b/tests/snippets/yang/test_revision_date.txt
new file mode 100644
index 0000000..09ff5a6
--- /dev/null
+++ b/tests/snippets/yang/test_revision_date.txt
@@ -0,0 +1,11 @@
+# Revision-date `2020-08-03` should be explicitly highlighted
+
+---input---
+revision 2020-03-08{
+
+---tokens---
+'revision' Keyword
+' ' Text.Whitespace
+'2020-03-08' Name.Label
+'{' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/snippets/yang/test_string_value.txt b/tests/snippets/yang/test_string_value.txt
new file mode 100644
index 0000000..41dcae5
--- /dev/null
+++ b/tests/snippets/yang/test_string_value.txt
@@ -0,0 +1,11 @@
+# String value `"5"` should be not explicitly highlighted
+
+---input---
+value "5";
+
+---tokens---
+'value' Keyword
+' ' Text.Whitespace
+'"5"' Literal.String.Double
+';' Punctuation
+'\n' Text.Whitespace
diff --git a/tests/support/empty.py b/tests/support/empty.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/support/empty.py
diff --git a/tests/support/html_formatter.py b/tests/support/html_formatter.py
new file mode 100644
index 0000000..5f04fd5
--- /dev/null
+++ b/tests/support/html_formatter.py
@@ -0,0 +1,5 @@
+from pygments.formatters import HtmlFormatter
+
+
+class HtmlFormatterWrapper(HtmlFormatter):
+ name = 'HtmlWrapper'
diff --git a/tests/support/python_lexer.py b/tests/support/python_lexer.py
new file mode 100644
index 0000000..78d9c4a
--- /dev/null
+++ b/tests/support/python_lexer.py
@@ -0,0 +1,11 @@
+# pygments.lexers.python (as CustomLexer) for test_cmdline.py
+
+from pygments.lexers import PythonLexer
+
+
+class CustomLexer(PythonLexer):
+ name = 'PythonLexerWrapper'
+
+
+class LexerWrapper(CustomLexer):
+ name = 'PythonLexerWrapperWrapper'
diff --git a/tests/support/structural_diff.py b/tests/support/structural_diff.py
new file mode 100644
index 0000000..cea27d1
--- /dev/null
+++ b/tests/support/structural_diff.py
@@ -0,0 +1,37 @@
+import html.parser
+
+
+class Parser(html.parser.HTMLParser):
+ def __init__(self):
+ super().__init__()
+ self._stream = []
+
+ def handle_starttag(self, tag, attrs):
+ attrs = sorted(attrs, key=lambda x: x[0])
+ attrs = '|'.join([k[0] + ':' + k[1] for k in attrs])
+ self._stream.append(('<', tag, attrs))
+
+ def handle_endtag(self, tag):
+ self._stream.append(('>', tag, ''))
+
+ def handle_data(self, data):
+ self._stream.append(('_', data, ''))
+
+ @property
+ def stream(self):
+ return self._stream
+
+
+def _serialize(t):
+ parser = Parser()
+ parser.feed(t)
+ return parser.stream
+
+
+def structural_diff(a, b):
+ """Check if there is a structural difference between two HTML files."""
+ a_s = _serialize(a)
+ b_s = _serialize(b)
+
+ for e, f in zip(a_s, b_s):
+ assert e == f, f'Expected: {e}, found: {f}'
diff --git a/tests/support/tags b/tests/support/tags
new file mode 100644
index 0000000..193779f
--- /dev/null
+++ b/tests/support/tags
@@ -0,0 +1,36 @@
+!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/
+!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/
+!_TAG_PROGRAM_AUTHOR Darren Hiebert /dhiebert@users.sourceforge.net/
+!_TAG_PROGRAM_NAME Exuberant Ctags //
+!_TAG_PROGRAM_URL http://ctags.sourceforge.net /official site/
+!_TAG_PROGRAM_VERSION 5.8 //
+HtmlFormatter test_html_formatter.py 19;" i
+HtmlFormatterTest test_html_formatter.py 34;" c
+NullFormatter test_html_formatter.py 19;" i
+PythonLexer test_html_formatter.py 18;" i
+StringIO test_html_formatter.py 13;" i
+dirname test_html_formatter.py 16;" i
+escape_html test_html_formatter.py 20;" i
+fp test_html_formatter.py 27;" v
+inspect test_html_formatter.py 15;" i
+isfile test_html_formatter.py 16;" i
+join test_html_formatter.py 16;" i
+os test_html_formatter.py 10;" i
+re test_html_formatter.py 11;" i
+subprocess test_html_formatter.py 125;" i
+support test_html_formatter.py 23;" i
+tempfile test_html_formatter.py 14;" i
+test_all_options test_html_formatter.py 72;" m class:HtmlFormatterTest
+test_correct_output test_html_formatter.py 35;" m class:HtmlFormatterTest
+test_ctags test_html_formatter.py 165;" m class:HtmlFormatterTest
+test_external_css test_html_formatter.py 48;" m class:HtmlFormatterTest
+test_get_style_defs test_html_formatter.py 141;" m class:HtmlFormatterTest
+test_lineanchors test_html_formatter.py 98;" m class:HtmlFormatterTest
+test_lineanchors_with_startnum test_html_formatter.py 106;" m class:HtmlFormatterTest
+test_linenos test_html_formatter.py 82;" m class:HtmlFormatterTest
+test_linenos_with_startnum test_html_formatter.py 90;" m class:HtmlFormatterTest
+test_unicode_options test_html_formatter.py 155;" m class:HtmlFormatterTest
+test_valid_output test_html_formatter.py 114;" m class:HtmlFormatterTest
+tokensource test_html_formatter.py 29;" v
+uni_open test_html_formatter.py 21;" i
+unittest test_html_formatter.py 12;" i
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
new file mode 100644
index 0000000..62ea489
--- /dev/null
+++ b/tests/test_basic_api.py
@@ -0,0 +1,351 @@
+"""
+ Pygments basic API tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import random
+from io import StringIO, BytesIO
+from os import path
+
+import pytest
+
+from pygments import lexers, formatters, lex, format
+from pygments.token import _TokenType, Text
+from pygments.lexer import RegexLexer
+from pygments.formatter import Formatter
+from pygments.formatters.img import FontNotFound
+from pygments.util import ClassNotFound
+
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_basic_api.py')
+
+test_content = [chr(i) for i in range(33, 128)] * 5
+random.shuffle(test_content)
+test_content = ''.join(test_content) + '\n'
+
+
+@pytest.mark.parametrize('name', lexers.LEXERS)
+def test_lexer_instantiate_all(name):
+ # instantiate every lexer, to see if the token type defs are correct
+ getattr(lexers, name)
+
+
+@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False))
+def test_lexer_classes(cls):
+ # test that every lexer class has the correct public API
+ assert type(cls.name) is str
+ for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
+ assert hasattr(cls, attr)
+ assert type(getattr(cls, attr)) is list, \
+ "%s: %s attribute wrong" % (cls, attr)
+ result = cls.analyse_text("abc")
+ assert isinstance(result, float) and 0.0 <= result <= 1.0
+ result = cls.analyse_text(".abc")
+ assert isinstance(result, float) and 0.0 <= result <= 1.0
+
+ assert all(al.lower() == al for al in cls.aliases)
+
+ if issubclass(cls, RegexLexer):
+ inst = cls(opt1="val1", opt2="val2")
+ if not hasattr(cls, '_tokens'):
+ # if there's no "_tokens", the lexer has to be one with
+ # multiple tokendef variants
+ assert cls.token_variants
+ for variant in cls.tokens:
+ assert 'root' in cls.tokens[variant]
+ else:
+ assert 'root' in cls._tokens, \
+ '%s has no root state' % cls
+
+
+@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False))
+def test_random_input(cls):
+ inst = cls()
+ try:
+ tokens = list(inst.get_tokens(test_content))
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt(
+ 'interrupted %s.get_tokens(): test_content=%r' %
+ (cls.__name__, test_content))
+ txt = ""
+ for token in tokens:
+ assert isinstance(token, tuple)
+ assert isinstance(token[0], _TokenType)
+ assert isinstance(token[1], str)
+ txt += token[1]
+ assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
+ (cls.name, test_content, txt)
+
+
+@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False))
+def test_lexer_options(cls):
+ if cls.__name__ == 'RawTokenLexer':
+ # this one is special
+ return
+
+ # test that the basic options work
+ def ensure(tokens, output):
+ concatenated = ''.join(token[1] for token in tokens)
+ assert concatenated == output, \
+ '%s: %r != %r' % (cls, concatenated, output)
+
+ inst = cls(stripnl=False)
+ ensure(inst.get_tokens('a\nb'), 'a\nb\n')
+ ensure(inst.get_tokens('\n\n\n'), '\n\n\n')
+ inst = cls(stripall=True)
+ ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n')
+ # some lexers require full lines in input
+ if ('ConsoleLexer' not in cls.__name__ and
+ 'SessionLexer' not in cls.__name__ and
+ not cls.__name__.startswith('Literate') and
+ cls.__name__ not in ('ErlangShellLexer', 'RobotFrameworkLexer')):
+ inst = cls(ensurenl=False)
+ ensure(inst.get_tokens('a\nb'), 'a\nb')
+ inst = cls(ensurenl=False, stripall=True)
+ ensure(inst.get_tokens('a\nb\n\n'), 'a\nb')
+
+
+def test_get_lexers():
+ # test that the lexers functions work
+ for func, args in [(lexers.get_lexer_by_name, ("python",)),
+ (lexers.get_lexer_for_filename, ("test.py",)),
+ (lexers.get_lexer_for_mimetype, ("text/x-python",)),
+ (lexers.guess_lexer, ("#!/usr/bin/python3 -O\nprint",)),
+ (lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>"))
+ ]:
+ x = func(opt='val', *args)
+ assert isinstance(x, lexers.PythonLexer)
+ assert x.options["opt"] == "val"
+
+ for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.items():
+ assert cls == lexers.find_lexer_class(lname).__name__
+
+ for alias in aliases:
+ assert cls == lexers.get_lexer_by_name(alias).__class__.__name__
+
+ for mimetype in mimetypes:
+ assert cls == lexers.get_lexer_for_mimetype(mimetype).__class__.__name__
+
+ try:
+ lexers.get_lexer_by_name(None)
+ except ClassNotFound:
+ pass
+ else:
+ raise Exception
+
+
+@pytest.mark.parametrize('cls', [getattr(formatters, name)
+ for name in formatters.FORMATTERS])
+def test_formatter_public_api(cls):
+ # test that every formatter class has the correct public API
+ ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
+ string_out = StringIO()
+ bytes_out = BytesIO()
+
+ info = formatters.FORMATTERS[cls.__name__]
+ assert len(info) == 5
+ assert info[1], "missing formatter name"
+ assert info[2], "missing formatter aliases"
+ assert info[4], "missing formatter docstring"
+
+ try:
+ inst = cls(opt1="val1")
+ except (ImportError, FontNotFound) as e:
+ pytest.skip(str(e))
+
+ try:
+ inst.get_style_defs()
+ except NotImplementedError:
+ # may be raised by formatters for which it doesn't make sense
+ pass
+
+ if cls.unicodeoutput:
+ inst.format(ts, string_out)
+ else:
+ inst.format(ts, bytes_out)
+
+
+def test_formatter_encodings():
+ from pygments.formatters import HtmlFormatter
+
+ # unicode output
+ fmt = HtmlFormatter()
+ tokens = [(Text, "ä")]
+ out = format(tokens, fmt)
+ assert type(out) is str
+ assert "ä" in out
+
+ # encoding option
+ fmt = HtmlFormatter(encoding="latin1")
+ tokens = [(Text, "ä")]
+ assert "ä".encode("latin1") in format(tokens, fmt)
+
+ # encoding and outencoding option
+ fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
+ tokens = [(Text, "ä")]
+ assert "ä".encode() in format(tokens, fmt)
+
+
+@pytest.mark.parametrize('cls', [getattr(formatters, name)
+ for name in formatters.FORMATTERS])
+def test_formatter_unicode_handling(cls):
+ # test that the formatter supports encoding and Unicode
+ tokens = list(lexers.PythonLexer(encoding='utf-8').
+ get_tokens("def f(): 'ä'"))
+
+ try:
+ inst = cls(encoding=None)
+ except (ImportError, FontNotFound) as e:
+ # some dependency or font not installed
+ pytest.skip(str(e))
+
+ if cls.name != 'Raw tokens':
+ out = format(tokens, inst)
+ if cls.unicodeoutput:
+ assert type(out) is str, '%s: %r' % (cls, out)
+
+ inst = cls(encoding='utf-8')
+ out = format(tokens, inst)
+ assert type(out) is bytes, '%s: %r' % (cls, out)
+ # Cannot test for encoding, since formatters may have to escape
+ # non-ASCII characters.
+ else:
+ inst = cls()
+ out = format(tokens, inst)
+ assert type(out) is bytes, '%s: %r' % (cls, out)
+
+
+def test_get_formatters():
+ # test that the formatters functions work
+ x = formatters.get_formatter_by_name("html", opt="val")
+ assert isinstance(x, formatters.HtmlFormatter)
+ assert x.options["opt"] == "val"
+
+ x = formatters.get_formatter_for_filename("a.html", opt="val")
+ assert isinstance(x, formatters.HtmlFormatter)
+ assert x.options["opt"] == "val"
+
+
+def test_styles():
+ # minimal style test
+ from pygments.formatters import HtmlFormatter
+ HtmlFormatter(style="pastie")
+
+
+def test_bare_class_handler():
+ from pygments.formatters import HtmlFormatter
+ from pygments.lexers import PythonLexer
+ try:
+ lex('test\n', PythonLexer)
+ except TypeError as e:
+ assert 'lex() argument must be a lexer instance' in str(e)
+ else:
+ assert False, 'nothing raised'
+ try:
+ format([], HtmlFormatter)
+ except TypeError as e:
+ assert 'format() argument must be a formatter instance' in str(e)
+ else:
+ assert False, 'nothing raised'
+
+ # These cases should not trigger this heuristic.
+ class BuggyLexer(RegexLexer):
+ def get_tokens(self, text, extra_argument):
+ pass
+ tokens = {'root': []}
+ try:
+ list(lex('dummy', BuggyLexer()))
+ except TypeError as e:
+ assert 'lex() argument must be a lexer instance' not in str(e)
+ else:
+ assert False, 'no error raised by buggy lexer?'
+
+ class BuggyFormatter(Formatter):
+ def format(self, tokensource, outfile, extra_argument):
+ pass
+ try:
+ format([], BuggyFormatter())
+ except TypeError as e:
+ assert 'format() argument must be a formatter instance' not in str(e)
+ else:
+ assert False, 'no error raised by buggy formatter?'
+
+class TestFilters:
+
+ def test_basic(self):
+ filters_args = [
+ ('whitespace', {'spaces': True, 'tabs': True, 'newlines': True}),
+ ('whitespace', {'wstokentype': False, 'spaces': True}),
+ ('highlight', {'names': ['isinstance', 'lexers', 'x']}),
+ ('codetagify', {'codetags': 'API'}),
+ ('keywordcase', {'case': 'capitalize'}),
+ ('raiseonerror', {}),
+ ('gobble', {'n': 4}),
+ ('tokenmerge', {}),
+ ('symbols', {'lang': 'isabelle'}),
+ ]
+ for x, args in filters_args:
+ lx = lexers.PythonLexer()
+ lx.add_filter(x, **args)
+ # We don't read as binary and decode, but instead read as text, as
+ # we need consistent line endings. Otherwise we'll get \r\n on
+ # Windows
+ with open(TESTFILE, encoding='utf-8') as fp:
+ text = fp.read()
+ tokens = list(lx.get_tokens(text))
+ assert all(isinstance(t[1], str) for t in tokens), \
+ '%s filter did not return Unicode' % x
+ roundtext = ''.join([t[1] for t in tokens])
+ if x not in ('whitespace', 'keywordcase', 'gobble'):
+ # these filters change the text
+ assert roundtext == text, \
+ "lexer roundtrip with %s filter failed" % x
+
+ def test_raiseonerror(self):
+ lx = lexers.PythonLexer()
+ lx.add_filter('raiseonerror', excclass=RuntimeError)
+ assert pytest.raises(RuntimeError, list, lx.get_tokens('$'))
+
+ def test_whitespace(self):
+ lx = lexers.PythonLexer()
+ lx.add_filter('whitespace', spaces='%')
+ with open(TESTFILE, 'rb') as fp:
+ text = fp.read().decode('utf-8')
+ lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
+ assert ' ' not in lxtext
+
+ def test_keywordcase(self):
+ lx = lexers.PythonLexer()
+ lx.add_filter('keywordcase', case='capitalize')
+ with open(TESTFILE, 'rb') as fp:
+ text = fp.read().decode('utf-8')
+ lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
+ assert 'Def' in lxtext and 'Class' in lxtext
+
+ def test_codetag(self):
+ lx = lexers.PythonLexer()
+ lx.add_filter('codetagify')
+ text = '# BUG: text'
+ tokens = list(lx.get_tokens(text))
+ assert '# ' == tokens[0][1]
+ assert 'BUG' == tokens[1][1]
+
+ def test_codetag_boundary(self):
+ # ticket #368
+ lx = lexers.PythonLexer()
+ lx.add_filter('codetagify')
+ text = '# DEBUG: text'
+ tokens = list(lx.get_tokens(text))
+ assert '# DEBUG: text' == tokens[0][1]
+
+ def test_symbols(self):
+ lx = lexers.IsabelleLexer()
+ lx.add_filter('symbols')
+ text = 'lemma "A \\<Longrightarrow> B"'
+ tokens = list(lx.get_tokens(text))
+ assert 'lemma' == tokens[0][1]
+ assert 'A ' == tokens[3][1]
+ assert '\U000027f9' == tokens[4][1]
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
new file mode 100644
index 0000000..c05fd01
--- /dev/null
+++ b/tests/test_cmdline.py
@@ -0,0 +1,324 @@
+"""
+ Command line test
+ ~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import io
+import os
+import re
+import sys
+import tempfile
+from io import BytesIO
+from os import path
+
+import pytest
+from pytest import raises
+
+from pygments import cmdline, highlight
+
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_cmdline.py')
+
+TESTCODE = '''\
+def func(args):
+ pass
+'''
+
+
+def _decode_output(text):
+ try:
+ return text.decode('utf-8')
+ except UnicodeEncodeError: # implicit encode on Python 2 with data loss
+ return text
+
+
+def run_cmdline(*args, **kwds):
+ saved_stdin = sys.stdin
+ saved_stdout = sys.stdout
+ saved_stderr = sys.stderr
+ stdin_buffer = BytesIO()
+ stdout_buffer = BytesIO()
+ stderr_buffer = BytesIO()
+ new_stdin = sys.stdin = io.TextIOWrapper(stdin_buffer, 'utf-8')
+ new_stdout = sys.stdout = io.TextIOWrapper(stdout_buffer, 'utf-8')
+ new_stderr = sys.stderr = io.TextIOWrapper(stderr_buffer, 'utf-8')
+ new_stdin.write(kwds.get('stdin', ''))
+ new_stdin.seek(0, 0)
+ try:
+ ret = cmdline.main(['pygmentize'] + list(args))
+ finally:
+ sys.stdin = saved_stdin
+ sys.stdout = saved_stdout
+ sys.stderr = saved_stderr
+ new_stdout.flush()
+ new_stderr.flush()
+ out, err = stdout_buffer.getvalue(), \
+ stderr_buffer.getvalue()
+ return (ret, _decode_output(out), _decode_output(err))
+
+
+def check_success(*cmdline, **kwds):
+ code, out, err = run_cmdline(*cmdline, **kwds)
+ assert code == 0
+ assert err == ''
+ return out
+
+
+def check_failure(*cmdline, **kwds):
+ expected_code = kwds.pop('code', 1)
+ try:
+ code, out, err = run_cmdline(*cmdline, **kwds)
+ except SystemExit as err:
+ assert err.args[0] == expected_code
+ else:
+ assert code == expected_code
+ assert out == ''
+ return err
+
+
+def test_normal():
+ # test that cmdline gives the same output as library api
+ from pygments.lexers import PythonLexer
+ from pygments.formatters import HtmlFormatter
+ filename = TESTFILE
+ with open(filename, 'rb') as fp:
+ code = fp.read()
+
+ output = highlight(code, PythonLexer(), HtmlFormatter())
+
+ o = check_success('-lpython', '-fhtml', filename)
+ assert o == output
+
+
+def test_stdin():
+ o = check_success('-lpython', '-fhtml', stdin=TESTCODE)
+ o = re.sub('<[^>]*>', '', o)
+ # rstrip is necessary since HTML inserts a \n after the last </div>
+ assert o.rstrip() == TESTCODE.rstrip()
+
+ # guess if no lexer given
+ o = check_success('-fhtml', stdin=TESTCODE)
+ o = re.sub('<[^>]*>', '', o)
+ # rstrip is necessary since HTML inserts a \n after the last </div>
+ assert o.rstrip() == TESTCODE.rstrip()
+
+
+def test_outfile():
+ # test that output file works with and without encoding
+ fd, name = tempfile.mkstemp()
+ os.close(fd)
+ for opts in [['-fhtml', '-o', name, TESTFILE],
+ ['-flatex', '-o', name, TESTFILE],
+ ['-fhtml', '-o', name, '-O', 'encoding=utf-8', TESTFILE]]:
+ try:
+ check_success(*opts)
+ finally:
+ os.unlink(name)
+
+
+def test_load_from_file():
+ lexer_file = os.path.join(TESTDIR, 'support', 'python_lexer.py')
+ formatter_file = os.path.join(TESTDIR, 'support', 'html_formatter.py')
+
+ # By default, use CustomLexer
+ o = check_success('-l', lexer_file, '-f', 'html', '-x', stdin=TESTCODE)
+ o = re.sub('<[^>]*>', '', o)
+ # rstrip is necessary since HTML inserts a \n after the last </div>
+ assert o.rstrip() == TESTCODE.rstrip()
+
+ # If user specifies a name, use it
+ o = check_success('-f', 'html', '-x', '-l',
+ lexer_file + ':LexerWrapper', stdin=TESTCODE)
+ o = re.sub('<[^>]*>', '', o)
+ # rstrip is necessary since HTML inserts a \n after the last </div>
+ assert o.rstrip() == TESTCODE.rstrip()
+
+ # Should also work for formatters
+ o = check_success('-lpython', '-f',
+ formatter_file + ':HtmlFormatterWrapper',
+ '-x', stdin=TESTCODE)
+ o = re.sub('<[^>]*>', '', o)
+ # rstrip is necessary since HTML inserts a \n after the last </div>
+ assert o.rstrip() == TESTCODE.rstrip()
+
+
+def test_stream_opt():
+ o = check_success('-lpython', '-s', '-fterminal', stdin=TESTCODE)
+ o = re.sub(r'\x1b\[.*?m', '', o)
+ assert o.replace('\r\n', '\n') == TESTCODE
+
+
+def test_h_opt():
+ o = check_success('-h')
+ assert 'usage:' in o
+
+
+def test_L_opt():
+ o = check_success('-L')
+ assert 'Lexers' in o and 'Formatters' in o and 'Filters' in o and 'Styles' in o
+ o = check_success('-L', 'lexer')
+ assert 'Lexers' in o and 'Formatters' not in o
+ check_success('-L', 'lexers')
+
+
+def test_O_opt():
+ filename = TESTFILE
+ o = check_success('-Ofull=1,linenos=true,foo=bar', '-fhtml', filename)
+ assert '<html' in o
+ assert 'class="linenos"' in o
+
+ # "foobar" is invalid for a bool option
+ e = check_failure('-Ostripnl=foobar', TESTFILE)
+ assert 'Error: Invalid value' in e
+ e = check_failure('-Ostripnl=foobar', '-lpy')
+ assert 'Error: Invalid value' in e
+
+
+def test_P_opt():
+ filename = TESTFILE
+ o = check_success('-Pfull', '-Ptitle=foo, bar=baz=,', '-fhtml', filename)
+ assert '<title>foo, bar=baz=,</title>' in o
+
+
+def test_F_opt():
+ filename = TESTFILE
+ o = check_success('-Fhighlight:tokentype=Name.Blubb,'
+ 'names=TESTFILE filename', '-fhtml', filename)
+ assert '<span class="n n-Blubb' in o
+
+
+def test_H_opt():
+ o = check_success('-H', 'formatter', 'html')
+ assert 'HTML' in o
+ o = check_success('-H', 'lexer', 'python')
+ assert 'Python' in o
+ o = check_success('-H', 'filter', 'raiseonerror')
+ assert 'raiseonerror' in o
+ e = check_failure('-H', 'lexer', 'foobar')
+ assert 'not found' in e
+
+
+def test_S_opt():
+ o = check_success('-S', 'default', '-f', 'html', '-O', 'linenos=1')
+ lines = o.splitlines()
+ for line in lines[5:]:
+ # every line is for a token class, except for the first 5 lines,
+ # which define styles for `pre` and line numbers
+ parts = line.split()
+ assert parts[0].startswith('.')
+ assert parts[1] == '{'
+ if parts[0] != '.hll':
+ assert parts[-4] == '}'
+ assert parts[-3] == '/*'
+ assert parts[-1] == '*/'
+ check_failure('-S', 'default', '-f', 'foobar')
+
+
+def test_N_opt():
+ o = check_success('-N', 'test.py')
+ assert 'python' == o.strip()
+ o = check_success('-N', 'test.unknown')
+ assert 'text' == o.strip()
+
+
+def test_C_opt():
+ o = check_success('-C', stdin='#!python3\n')
+ assert 'python' == o.strip()
+ o = check_success('-C', stdin='x')
+ assert 'text' == o.strip()
+
+
+@pytest.mark.parametrize('opts', [
+ ('-X',),
+ ('-L', '-lpy'),
+ ('-L', '-fhtml'),
+ ('-L', '-Ox'),
+ ('-S', 'default', '-l', 'py', '-f', 'html'),
+ ('-S', 'default'),
+ ('-a', 'arg'),
+ ('-H',),
+ (TESTFILE, TESTFILE),
+ ('-H', 'formatter'),
+ ('-H', 'foo', 'bar'),
+ ('-s',),
+ ('-s', TESTFILE),
+])
+def test_invalid_opts(opts):
+ check_failure(*opts, code=2)
+
+
+def test_errors():
+ # input file not found
+ e = check_failure('-lpython', 'nonexistent.py')
+ assert 'Error: cannot read infile' in e
+ assert 'nonexistent.py' in e
+
+ # lexer not found
+ e = check_failure('-lfooo', TESTFILE)
+ assert 'Error: no lexer for alias' in e
+
+ # cannot load .py file without load_from_file flag
+ e = check_failure('-l', 'nonexistent.py', TESTFILE)
+ assert 'Error: no lexer for alias' in e
+
+ # lexer file is missing/unreadable
+ e = check_failure('-l', 'nonexistent.py', '-x', TESTFILE)
+ assert 'Error: cannot read' in e
+
+ # lexer file is malformed
+ e = check_failure('-l', path.join(TESTDIR, 'support', 'empty.py'),
+ '-x', TESTFILE)
+ assert 'Error: no valid CustomLexer class found' in e
+
+ # formatter not found
+ e = check_failure('-lpython', '-ffoo', TESTFILE)
+ assert 'Error: no formatter found for name' in e
+
+ # formatter for outfile not found
+ e = check_failure('-ofoo.foo', TESTFILE)
+ assert 'Error: no formatter found for file name' in e
+
+ # cannot load .py file without load_from_file flag
+ e = check_failure('-f', 'nonexistent.py', TESTFILE)
+ assert 'Error: no formatter found for name' in e
+
+ # formatter file is missing/unreadable
+ e = check_failure('-f', 'nonexistent.py', '-x', TESTFILE)
+ assert 'Error: cannot read' in e
+
+ # formatter file is malformed
+ e = check_failure('-f', path.join(TESTDIR, 'support', 'empty.py'),
+ '-x', TESTFILE)
+ assert 'Error: no valid CustomFormatter class found' in e
+
+ # output file not writable
+ e = check_failure('-o', os.path.join('nonexistent', 'dir', 'out.html'),
+ '-lpython', TESTFILE)
+ assert 'Error: cannot open outfile' in e
+ assert 'out.html' in e
+
+ # unknown filter
+ e = check_failure('-F', 'foo', TESTFILE)
+ assert 'Error: filter \'foo\' not found' in e
+
+
+def test_exception():
+ cmdline.highlight = None # override callable to provoke TypeError
+ try:
+ # unexpected exception while highlighting
+ e = check_failure('-lpython', TESTFILE)
+ assert '*** Error while highlighting:' in e
+ assert 'TypeError' in e
+
+ # same with -v: should reraise the exception
+ assert raises(Exception, check_failure, '-lpython', '-v', TESTFILE)
+ finally:
+ cmdline.highlight = highlight
+
+
+def test_parse_opts():
+ assert cmdline._parse_options([' ', 'keyonly,key = value ']) == \
+ {'keyonly': True, 'key': 'value'}
diff --git a/tests/test_coffeescript.py b/tests/test_coffeescript.py
new file mode 100644
index 0000000..3d16727
--- /dev/null
+++ b/tests/test_coffeescript.py
@@ -0,0 +1,52 @@
+"""
+ CoffeeScript tests
+ ~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers import CoffeeScriptLexer
+from pygments.token import Token
+
+COFFEE_SLASH_GOLDEN = [
+ # input_str, slashes_are_regex_here
+ (r'/\\/', True),
+ (r'/\\/i', True),
+ (r'/\//', True),
+ (r'/(\s)/', True),
+ ('/a{2,8}/', True),
+ ('/b*c?d+/', True),
+ ('/(capture-match)/', True),
+ ('/(?:do-not-capture-match)/', True),
+ ('/this|or|that/', True),
+ ('/[char-set]/', True),
+ ('/[^neg-char_st]/', True),
+ ('/^.*$/', True),
+ (r'/\n(\f)\0\1\d\b\cm\u1234/', True),
+ (r'/^.?([^/\\\n\w]*)a\1+$/.something(or_other) # something more complex', True),
+ ("foo = (str) ->\n /'|\"/.test str", True),
+ ('a = a / b / c', False),
+ ('a = a/b/c', False),
+ ('a = a/b/ c', False),
+ ('a = a /b/c', False),
+ ('a = 1 + /d/.test(a)', True),
+]
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield CoffeeScriptLexer()
+
+
+@pytest.mark.parametrize('golden', COFFEE_SLASH_GOLDEN)
+def test_coffee_slashes(lexer, golden):
+ input_str, slashes_are_regex_here = golden
+ output = list(lexer.get_tokens(input_str))
+ print(output)
+ for t, s in output:
+ if '/' in s:
+ is_regex = t is Token.String.Regex
+ assert is_regex == slashes_are_regex_here, (t, s)
diff --git a/tests/test_crystal.py b/tests/test_crystal.py
new file mode 100644
index 0000000..962d9e5
--- /dev/null
+++ b/tests/test_crystal.py
@@ -0,0 +1,80 @@
+"""
+ Basic CrystalLexer Test
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.token import Text, String, Number, Punctuation, Error, Whitespace
+from pygments.lexers import CrystalLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield CrystalLexer()
+
+
+def test_numbers(lexer):
+ for kind, testset in [
+ (Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'),
+ (Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'),
+ (Number.Bin, '0b1001_0110 0b0u8'),
+ (Number.Oct, '0o17 0o7_i32'),
+ (Number.Hex, '0xdeadBEEF'),
+ ]:
+ for fragment in testset.split():
+ assert list(lexer.get_tokens(fragment + '\n')) == \
+ [(kind, fragment), (Whitespace, '\n')]
+
+ for fragment in '01 0b2 0x129g2 0o12358'.split():
+ assert next(lexer.get_tokens(fragment + '\n'))[0] == Error
+
+
+def test_symbols(lexer):
+ for fragment in [':sym_bol', ':\u3042', ':question?']:
+ assert list(lexer.get_tokens(fragment + '\n')) == \
+ [(String.Symbol, fragment), (Whitespace, '\n')]
+
+ fragment = ':"sym bol"\n'
+ tokens = [
+ (String.Symbol, ':"'),
+ (String.Symbol, 'sym bol'),
+ (String.Symbol, '"'),
+ (Whitespace, '\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_chars(lexer):
+ for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]:
+ assert list(lexer.get_tokens(fragment + '\n')) == \
+ [(String.Char, fragment), (Whitespace, '\n')]
+ assert next(lexer.get_tokens("'abc'"))[0] == Error
+
+
+def test_string_escapes(lexer):
+ for body in ['\\n', '\\a', '\\xff', '\\u1234', '\\000', '\\u{0}', '\\u{10AfF9}']:
+ fragment = '"a' + body + 'z"\n'
+ assert list(lexer.get_tokens(fragment)) == [
+ (String.Double, '"'),
+ (String.Double, 'a'),
+ (String.Escape, body),
+ (String.Double, 'z'),
+ (String.Double, '"'),
+ (Whitespace, '\n'),
+ ]
+
+
+def test_empty_percent_strings(lexer):
+ for body in ['%()', '%[]', '%{}', '%<>', '%||']:
+ fragment = '(' + body + ')\n'
+ assert list(lexer.get_tokens(fragment)) == [
+ (Punctuation, '('),
+ (String.Other, body[:-1]),
+ (String.Other, body[-1]),
+ (Punctuation, ')'),
+ (Whitespace, '\n'),
+ ]
diff --git a/tests/test_data.py b/tests/test_data.py
new file mode 100644
index 0000000..719f8bf
--- /dev/null
+++ b/tests/test_data.py
@@ -0,0 +1,285 @@
+"""
+ Data Tests
+ ~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import time
+
+import pytest
+
+from pygments.lexers.data import JsonLexer, JsonBareObjectLexer, JsonLdLexer
+from pygments.token import Comment, Error, Token, Punctuation, Number, String, \
+ Keyword, Name, Whitespace
+
+
+@pytest.fixture(scope='module')
+def lexer_json():
+ yield JsonLexer()
+
+
+@pytest.fixture(scope='module')
+def lexer_bare():
+ yield JsonBareObjectLexer()
+
+
+@pytest.fixture(scope='module')
+def lexer_json_ld():
+ yield JsonLdLexer()
+
+
+@pytest.mark.parametrize(
+ 'text, expected_token_types',
+ (
+ # Integers
+ ('0', (Number.Integer,)),
+ ('-1', (Number.Integer,)),
+ ('1234567890', (Number.Integer,)),
+ ('-1234567890', (Number.Integer,)),
+
+ # Floats, including scientific notation
+ ('123456789.0123456789', (Number.Float,)),
+ ('-123456789.0123456789', (Number.Float,)),
+ ('1e10', (Number.Float,)),
+ ('-1E10', (Number.Float,)),
+ ('1e-10', (Number.Float,)),
+ ('-1E+10', (Number.Float,)),
+ ('1.0e10', (Number.Float,)),
+ ('-1.0E10', (Number.Float,)),
+ ('1.0e-10', (Number.Float,)),
+ ('-1.0E+10', (Number.Float,)),
+
+ # Strings (escapes are tested elsewhere)
+ ('""', (String.Double,)),
+ ('"abc"', (String.Double,)),
+ ('"ひらがな"', (String.Double,)),
+ ('"123"', (String.Double,)),
+ ('"[]"', (String.Double,)),
+ ('"{}"', (String.Double,)),
+ ('"true"', (String.Double,)),
+ ('"false"', (String.Double,)),
+ ('"null"', (String.Double,)),
+ ('":,"', (String.Double,)),
+
+ # Constants
+ ('true', (Keyword.Constant, )),
+ ('false', (Keyword.Constant, )),
+ ('null', (Keyword.Constant, )),
+
+ # Arrays
+ ('[]', (Punctuation,)),
+ ('["a", "b"]', (Punctuation, String.Double, Punctuation,
+ Whitespace, String.Double, Punctuation)),
+
+ # Objects
+ ('{}', (Punctuation,)),
+ ('{"a": "b"}', (Punctuation, Name.Tag, Punctuation,
+ Whitespace, String.Double, Punctuation)),
+ )
+)
+@pytest.mark.parametrize('end', ('', '\n'))
+def test_json_literals_positive_match(lexer_json, text, expected_token_types, end):
+ """Validate that syntactically-correct JSON literals are parsed correctly."""
+
+ tokens = list(lexer_json.get_tokens_unprocessed(text + end))
+ assert len(tokens) == len(expected_token_types) + bool(end)
+ assert all(token[1] is expected_token
+ for token, expected_token in zip(tokens, expected_token_types))
+ assert ''.join(token[2] for token in tokens) == text + end
+
+
+@pytest.mark.parametrize(
+ 'text, expected',
+ (
+ ('\u0020', Whitespace), # space
+ ('\u000a', Whitespace), # newline
+ ('\u000d', Whitespace), # carriage return
+ ('\u0009', Whitespace), # tab
+ )
+)
+def test_json_whitespace_positive_match(lexer_json, text, expected):
+ """Validate that whitespace is parsed correctly."""
+
+ tokens = list(lexer_json.get_tokens_unprocessed(text))
+ assert tokens == [(0, expected, text)]
+
+ # Expand the whitespace and verify parsing again.
+ tokens = list(lexer_json.get_tokens_unprocessed(text * 2 + ' '))
+ assert tokens == [(0, expected, text * 2 + ' ')]
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '"', '\\', '/', 'b', 'f', 'n', 'r', 't',
+ 'u0123', 'u4567', 'u89ab', 'ucdef', 'uABCD', 'uEF01',
+ )
+)
+def test_json_object_key_escapes_positive_match(lexer_json, text):
+ """Validate that escape sequences in JSON object keys are parsed correctly."""
+
+ tokens = list(lexer_json.get_tokens_unprocessed('{"\\%s": 1}' % text))
+ assert len(tokens) == 6
+ assert tokens[1][1] is Name.Tag
+ assert tokens[1][2] == '"\\%s"' % text
+
+
+@pytest.mark.parametrize('text', ('uz', 'u1z', 'u12z', 'u123z'))
+def test_json_string_unicode_escapes_negative_match(lexer_json, text):
+ """Validate that if unicode escape sequences end abruptly there's no problem."""
+
+ tokens = list(lexer_json.get_tokens_unprocessed('"\\%s"' % text))
+ assert len(tokens) == 1
+ assert tokens[0] == (0, String.Double, '"\\%s"' % text)
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '"', '\\', '/', 'b', 'f', 'n', 'r', 't',
+ 'u0123', 'u4567', 'u89ab', 'ucdef', 'uABCD', 'uEF01',
+ )
+)
+def test_json_string_escapes_positive_match(lexer_json, text):
+ """Validate that escape sequences in JSON string values are parsed correctly."""
+
+ text = '"\\%s"' % text
+ tokens = list(lexer_json.get_tokens_unprocessed(text))
+ assert len(tokens) == 1
+ assert tokens[0][1] is String.Double
+ assert tokens[0][2] == text
+
+
+@pytest.mark.parametrize('text', ('+\n', '0\n', '""0\n', 'a\nb\n', '""/-'))
+def test_json_round_trip_errors(lexer_json, text):
+ """Validate that past round-trip errors never crop up again."""
+
+ tokens = list(lexer_json.get_tokens_unprocessed(text))
+ assert ''.join(t[2] for t in tokens) == text
+
+
+def test_json_comments_single_line_positive_matches(lexer_json):
+ """Verify that single-line comments are tokenized correctly."""
+
+ text = '{"a//b"//C1\n:123/////C2\n}\n// // C3'
+ tokens = list(lexer_json.get_tokens_unprocessed(text))
+ assert tokens[2] == (7, Comment.Single, "//C1")
+ assert tokens[6] == (16, Comment.Single, "/////C2")
+ assert tokens[10] == (26, Comment.Single, "// // C3")
+
+ comment_count = sum(1 for token in tokens if token[1] == Comment or token[1].parent == Comment)
+ assert comment_count == 3
+
+ parsed_text = ''.join(token[2] for token in tokens)
+ assert parsed_text == text, 'Input and output texts must match!'
+
+
+def test_json_comments_multiline_positive_matches(lexer_json):
+ """Verify that multiline comments are tokenized correctly."""
+
+ text = '/** / **/{"a /**/ b"/* \n */:123}'
+ tokens = list(lexer_json.get_tokens_unprocessed(text))
+ assert tokens[0] == (0, Comment.Multiline, "/** / **/")
+ assert tokens[3] == (20, Comment.Multiline, "/* \n */")
+
+ comment_count = sum(1 for token in tokens if token[1] == Comment or token[1].parent == Comment)
+ assert comment_count == 2
+
+ parsed_text = ''.join(token[2] for token in tokens)
+ assert parsed_text == text, 'Input and output texts must match!'
+
+
+@pytest.mark.parametrize(
+ "text, expected",
+ (
+ # Unfinished comment openers
+ ('/', (0, Error, '/')),
+ ('1/', (1, Error, '/')),
+ ('/1', (0, Error, '/')),
+ ('""/', (2, Error, '/')),
+ # Unclosed multiline comments
+ ('/*', (0, Error, '/*')),
+ ('/**', (0, Error, '/**')),
+ ('/*/', (0, Error, '/*/')),
+ ('1/*', (1, Error, '/*')),
+ ('""/*', (2, Error, '/*')),
+ ('""/**', (2, Error, '/**')),
+ )
+)
+def test_json_comments_negative_matches(lexer_json, text, expected):
+ """Verify that the unfinished or unclosed comments are parsed as errors."""
+
+ tokens = list(lexer_json.get_tokens_unprocessed(text))
+ assert expected in tokens
+
+ parsed_text = ''.join(token[2] for token in tokens)
+ assert parsed_text == text, 'Input and output texts must match!'
+
+
+def test_json_escape_backtracking(lexer_json):
+ """Confirm that there is no catastrophic backtracking in the lexer.
+
+ This no longer applies because the JSON lexer doesn't use regular expressions,
+ but the test is included to ensure no loss of functionality now or in the future.
+ """
+
+ fragment = r'{"\u00D0000\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\63CD'
+ start_time = time.time()
+ list(lexer_json.get_tokens(fragment))
+ assert time.time() - start_time < 1, 'The JSON lexer may have catastrophic backtracking'
+
+
+@pytest.mark.parametrize(
+ 'keyword',
+ (
+ 'base',
+ 'container',
+ 'context',
+ 'direction',
+ 'graph',
+ 'id',
+ 'import',
+ 'included',
+ 'index',
+ 'json',
+ 'language',
+ 'list',
+ 'nest',
+ 'none',
+ 'prefix',
+ 'propagate',
+ 'protected',
+ 'reverse',
+ 'set',
+ 'type',
+ 'value',
+ 'version',
+ 'vocab',
+ )
+)
+def test_json_ld_keywords_positive_match(lexer_json_ld, keyword):
+ """Validate that JSON-LD keywords are parsed correctly."""
+
+ tokens = list(lexer_json_ld.get_tokens_unprocessed('{"@%s": ""}' % keyword))
+ assert len(tokens) == 6
+ assert tokens[1][1] is Token.Name.Decorator
+ assert tokens[1][2] == '"@%s"' % keyword
+
+
+@pytest.mark.parametrize(
+ 'keyword',
+ (
+ '@bogus', # "@" does not guarantee a keyword match
+ '@bases', # Begins with the keyword "@base"
+ 'container', # Matches "container" but has no leading "@"
+ )
+)
+def test_json_ld_keywords_negative_match(lexer_json_ld, keyword):
+ """Validate that JSON-LD non-keywords are parsed correctly."""
+
+ tokens = list(lexer_json_ld.get_tokens_unprocessed('{"%s": ""}' % keyword))
+ assert len(tokens) == 6
+ assert tokens[1][1] is Token.Name.Tag
+ assert tokens[1][2] == '"%s"' % keyword
diff --git a/tests/test_devicetree_lexer.py b/tests/test_devicetree_lexer.py
new file mode 100644
index 0000000..e3aeb4d
--- /dev/null
+++ b/tests/test_devicetree_lexer.py
@@ -0,0 +1,32 @@
+"""
+ Devicetree Lexer Tests
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.devicetree import DevicetreeLexer
+from pygments.token import Token
+
+
+@pytest.fixture(scope="module")
+def lexer():
+ yield DevicetreeLexer()
+
+
+@pytest.mark.parametrize(
+ "fragment",
+ (
+ 'nodelabel: node@0 { foo = "bar"; };',
+ 'nodelabel: node { foo = "bar"; };',
+ 'nodelabel0: nodelabel1: node@0 { foo = "bar"; };',
+ ),
+)
+def test_fragment_out_of_root_node(lexer, fragment):
+ """Validate that a devicetree fragment out of a root node is parsed correctly."""
+
+ tokens = list(lexer.get_tokens(fragment))
+ assert all(x[0] != Token.Error for x in tokens)
diff --git a/tests/test_func.py b/tests/test_func.py
new file mode 100644
index 0000000..c494b9c
--- /dev/null
+++ b/tests/test_func.py
@@ -0,0 +1,44 @@
+import pytest
+from pygments.lexers.func import FuncLexer
+from pygments.token import Token, Name
+
+@pytest.fixture(scope='module')
+def lexer_func():
+ yield FuncLexer()
+
+
+@pytest.mark.parametrize('text', (
+ 'take(first)Entry', '"not_a_string', 'msg.sender', 'send_message,then_terminate', '_'))
+def test_func_not_identifier(lexer_func, text):
+ """Test text that should **not** be tokenized as identifier."""
+ assert list(lexer_func.get_tokens(text))[0] != (Name.Variable, text)
+
+
+@pytest.mark.parametrize('text', (
+ '`test identifier`', 'simple_identifier', 'query\'\'',
+ '_internal_value', 'get_pubkeys&signatures',
+ 'dict::udict_set_builder', '2+2=2*2', '-alsovalidname', '{hehehe}'))
+def test_func_identifier(lexer_func, text):
+ """Test text that should be tokenized as identifier."""
+ assert list(lexer_func.get_tokens(text))[0] == (Name.Variable, text)
+
+
+@pytest.mark.parametrize('text', (
+'`test identifier`(', 'simple_identifier(', 'query\'\'(',
+'_internal_value(', 'get_pubkeys&signatures(',
+'dict::udict_set_builder(', '2+2=2*2(', '-alsovalidname(', '{hehehe}('))
+def test_func_function(lexer_func, text):
+ """Test text that should be tokenized as identifier."""
+ assert list(lexer_func.get_tokens(text))[0] == (Name.Function, text[:-1])
+
+
+@pytest.mark.parametrize('text', ('0x0f', '0x1_2', '123', '0b10', '0xffff_fff', '1'))
+def test_func_number(lexer_func, text):
+ """Test text that should be tokenized as number."""
+ assert list(lexer_func.get_tokens(text))[0] == (Token.Literal.Number, text)
+
+
+@pytest.mark.parametrize('text', ('0x0f_m', '0X1_2', '12d3', '0b1_0f', '0bff_fff', '0b'))
+def test_func_not_number(lexer_func, text):
+ """Test text that should *not* be tokenized as number."""
+ assert list(lexer_func.get_tokens(text))[0] != (Token.Literal.Number, text)
diff --git a/tests/test_groff_formatter.py b/tests/test_groff_formatter.py
new file mode 100644
index 0000000..faad338
--- /dev/null
+++ b/tests/test_groff_formatter.py
@@ -0,0 +1,40 @@
+"""
+ Pygments Groff formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments import highlight
+from pygments.lexer import RegexLexer
+from pygments.style import Style
+from pygments.token import Token
+from pygments.formatters import GroffFormatter
+
+
+# FIXME: this tests a bug fix, but the basic functionality
+# is not tested thoroughly yet.
+
+class ToyLexer(RegexLexer):
+ tokens = {
+ "root": [
+ ("a", Token.Name),
+ ("b", Token.Name.Custom),
+ ],
+ }
+
+class ToyStyle(Style):
+ styles = {
+ Token.Name: "bold",
+ }
+
+
+expected = r""".nf
+\f[CR]
+\f[CB]a\f[CR]\f[CB]b\f[CR]
+
+.fi"""
+
+def test_inheritance_custom_tokens():
+ assert highlight("ab", ToyLexer(), GroffFormatter(style=ToyStyle)) == expected
diff --git a/tests/test_guess.py b/tests/test_guess.py
new file mode 100644
index 0000000..8adf161
--- /dev/null
+++ b/tests/test_guess.py
@@ -0,0 +1,184 @@
+"""
+ Pygments basic API tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pathlib import Path
+
+import pytest
+
+from pygments.lexers import guess_lexer, get_lexer_by_name
+from pygments.lexers.basic import CbmBasicV2Lexer
+from pygments.lexers.ecl import ECLLexer
+
+TESTDIR = Path(__file__).resolve().parent
+
+
+def get_input(lexer, filename):
+ return Path(TESTDIR, 'examplefiles', lexer, filename).read_text(encoding='utf-8')
+
+
+@pytest.mark.skip(reason="This is identified as T-SQL")
+def test_guess_lexer_fsharp():
+ lx = guess_lexer(get_input('fsharp', 'Deflate.fs'))
+ assert lx.__class__.__name__ == 'FSharpLexer'
+
+
+def test_guess_lexer_brainfuck():
+ lx = guess_lexer('>>[-]<<[->>+<<]')
+ assert lx.__class__.__name__ == 'BrainfuckLexer'
+
+
+def test_guess_lexer_singularity():
+ lx = guess_lexer(get_input('singularity', 'Singularity'))
+ assert lx.__class__.__name__ == 'SingularityLexer'
+
+
+@pytest.mark.skip(reason="This is identified as MIME")
+def test_guess_lexer_matlab():
+ lx = guess_lexer(r'A \ B')
+ assert lx.__class__.__name__ == 'OctaveLexer'
+
+
+@pytest.mark.skip(reason="This is identified as Python")
+def test_guess_lexer_hybris():
+ lx = guess_lexer(get_input('hybris', 'hybris_File.hy'))
+ assert lx.__class__.__name__ == 'HybrisLexer'
+
+
+def test_guess_lexer_forth():
+ lx = guess_lexer(get_input('forth', 'demo.frt'))
+ assert lx.__class__.__name__ == 'ForthLexer'
+
+
+def test_guess_lexer_modula2():
+ lx = guess_lexer(get_input('modula2', 'modula2_test_cases.def'))
+ assert lx.__class__.__name__ == 'Modula2Lexer'
+
+
+def test_guess_lexer_unicon():
+ lx = guess_lexer(get_input('unicon', 'example.icn'))
+ assert lx.__class__.__name__ == 'UcodeLexer'
+
+
+def test_guess_lexer_ezhil():
+ lx = guess_lexer(get_input('ezhil', 'ezhil_primefactors.n'))
+ assert lx.__class__.__name__ == 'EzhilLexer'
+
+
+def test_guess_lexer_gdscript():
+ lx = guess_lexer(get_input('gdscript', 'gdscript_example.gd'))
+ assert lx.__class__.__name__ == 'GDScriptLexer'
+
+
+def test_guess_lexer_gap():
+ lx = guess_lexer(get_input('gap', 'example.gd'))
+ assert lx.__class__.__name__ == 'GAPLexer'
+ lx = guess_lexer(get_input('gap', 'example.gi'))
+ assert lx.__class__.__name__ == 'GAPLexer'
+
+
+def test_guess_lexer_easytrieve():
+ lx = guess_lexer(get_input('easytrieve', 'example.ezt'))
+ assert lx.__class__.__name__ == 'EasytrieveLexer'
+ lx = guess_lexer(get_input('easytrieve', 'example.mac'))
+ assert lx.__class__.__name__ == 'EasytrieveLexer'
+
+
+def test_guess_lexer_jcl():
+ lx = guess_lexer(get_input('jcl', 'example.jcl'))
+ assert lx.__class__.__name__ == 'JclLexer'
+
+
+def test_guess_lexer_rexx():
+ lx = guess_lexer(get_input('rexx', 'example.rexx'))
+ assert lx.__class__.__name__ == 'RexxLexer'
+
+
+def test_easytrieve_can_guess_from_text():
+ lx = get_lexer_by_name('easytrieve')
+ assert lx.analyse_text('MACRO')
+ assert lx.analyse_text('\nMACRO')
+ assert lx.analyse_text(' \nMACRO')
+ assert lx.analyse_text(' \n MACRO')
+ assert lx.analyse_text('*\nMACRO')
+ assert lx.analyse_text('*\n *\n\n \n*\n MACRO')
+
+
+def test_rexx_can_guess_from_text():
+ lx = get_lexer_by_name('rexx')
+ assert lx.analyse_text('/* */') == pytest.approx(0.01)
+ assert lx.analyse_text('''/* Rexx */
+ say "hello world"''') == pytest.approx(1.0)
+ val = lx.analyse_text('/* */\n'
+ 'hello:pRoceduRe\n'
+ ' say "hello world"')
+ assert val > 0.5
+ val = lx.analyse_text('''/* */
+ if 1 > 0 then do
+ say "ok"
+ end
+ else do
+ say "huh?"
+ end''')
+ assert val > 0.2
+ val = lx.analyse_text('''/* */
+ greeting = "hello world!"
+ parse value greeting "hello" name "!"
+ say name''')
+ assert val > 0.2
+
+
+def test_guess_cmake_lexer_from_header():
+ headers = [
+ "CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR)",
+ "cmake_minimum_required(version 3.13) # CMake version check",
+ " CMAKE_MINIMUM_REQUIRED\t( VERSION 2.6 FATAL_ERROR ) ",
+ ]
+ for header in headers:
+ code = '\n'.join([
+ header,
+ 'project(example)',
+ 'set(CMAKE_CXX_STANDARD 14)',
+ 'set(SOURCE_FILES main.cpp)',
+ 'add_executable(example ${SOURCE_FILES})',
+ ])
+ lexer = guess_lexer(code)
+ assert lexer.__class__.__name__ == 'CMakeLexer', \
+ "header must be detected as CMake: %r" % header
+
+
+def test_guess_c_lexer():
+ code = '''
+ #include <stdio.h>
+ #include <stdlib.h>
+
+ int main(void);
+
+ int main(void) {
+ uint8_t x = 42;
+ uint8_t y = x + 1;
+
+ /* exit 1 for success! */
+ return 1;
+ }
+ '''
+ lexer = guess_lexer(code)
+ assert lexer.__class__.__name__ == 'CLexer'
+
+
+def test_cbmbasicv2_analyse_text():
+ text = "10 PRINT \"PART 1\""
+ res = CbmBasicV2Lexer.analyse_text(text)
+ assert res == 0.2
+
+
+def test_ecl_analyze_text():
+ text = r"""
+ STRING ABC -> size32_t lenAbc, const char * abc;
+ """
+ res = ECLLexer.analyse_text(text)
+ assert res == 0.01
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
new file mode 100644
index 0000000..e1a02b2
--- /dev/null
+++ b/tests/test_html_formatter.py
@@ -0,0 +1,271 @@
+"""
+ Pygments HTML formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import re
+import tempfile
+from io import StringIO
+from os import path
+
+import pytest
+
+from pygments.formatters import HtmlFormatter, NullFormatter
+from pygments.formatters.html import escape_html
+from pygments.lexers import PythonLexer
+from pygments.style import Style
+
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_html_formatter.py')
+
+with open(TESTFILE, encoding='utf-8') as fp:
+ tokensource = list(PythonLexer().get_tokens(fp.read()))
+
+
+def test_correct_output():
+ hfmt = HtmlFormatter(nowrap=True)
+ houtfile = StringIO()
+ hfmt.format(tokensource, houtfile)
+
+ nfmt = NullFormatter()
+ noutfile = StringIO()
+ nfmt.format(tokensource, noutfile)
+
+ stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
+ escaped_text = escape_html(noutfile.getvalue())
+ assert stripped_html == escaped_text
+
+
+def test_external_css():
+ # test correct behavior
+ # CSS should be in /tmp directory
+ fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
+ # CSS should be in TESTDIR (TESTDIR is absolute)
+ fmt2 = HtmlFormatter(full=True, cssfile=path.join(TESTDIR, 'fmt2.css'),
+ outencoding='utf-8')
+ tfile = tempfile.NamedTemporaryFile(suffix='.html')
+ fmt1.format(tokensource, tfile)
+ try:
+ fmt2.format(tokensource, tfile)
+ assert path.isfile(path.join(TESTDIR, 'fmt2.css'))
+ except OSError:
+ # test directory not writable
+ pass
+ tfile.close()
+
+ assert path.isfile(path.join(path.dirname(tfile.name), 'fmt1.css'))
+ os.unlink(path.join(path.dirname(tfile.name), 'fmt1.css'))
+ try:
+ os.unlink(path.join(TESTDIR, 'fmt2.css'))
+ except OSError:
+ pass
+
+
+def test_all_options():
+ def check(optdict):
+ outfile = StringIO()
+ fmt = HtmlFormatter(**optdict)
+ fmt.format(tokensource, outfile)
+
+ for optdict in [
+ dict(nowrap=True),
+ dict(linenos=True, full=True),
+ dict(linenos=True, linespans='L'),
+ dict(hl_lines=[1, 5, 10, 'xxx']),
+ dict(hl_lines=[1, 5, 10], noclasses=True),
+ ]:
+ check(optdict)
+
+ for linenos in [False, 'table', 'inline']:
+ for noclasses in [False, True]:
+ for linenospecial in [0, 5]:
+ for anchorlinenos in [False, True]:
+ optdict = dict(
+ linenos=linenos,
+ noclasses=noclasses,
+ linenospecial=linenospecial,
+ anchorlinenos=anchorlinenos,
+ )
+ check(optdict)
+
+
+def test_linespans():
+ outfile = StringIO()
+ fmt = HtmlFormatter(linespans='L', anchorlinenos=True, linenos="inline")
+ fmt.format(tokensource, outfile)
+ html = outfile.getvalue()
+ assert re.search(r"""<span id="L-1">\s*<a href="#L-1"><span\s*class="linenos">\s*1</span></a>""", html)
+
+
+def test_lineanchors():
+ optdict = dict(lineanchors="foo")
+ outfile = StringIO()
+ fmt = HtmlFormatter(**optdict)
+ fmt.format(tokensource, outfile)
+ html = outfile.getvalue()
+ assert re.search("<pre>\\s*<span>\\s*</span>\\s*<a id=\"foo-1\" name=\"foo-1\" href=\"#foo-1\">", html)
+
+
+def test_lineanchors_with_startnum():
+ optdict = dict(lineanchors="foo", linenostart=5)
+ outfile = StringIO()
+ fmt = HtmlFormatter(**optdict)
+ fmt.format(tokensource, outfile)
+ html = outfile.getvalue()
+ assert re.search("<pre>\\s*<span>\\s*</span>\\s*<a id=\"foo-5\" name=\"foo-5\" href=\"#foo-5\">", html)
+
+
+def test_valid_output():
+ # test all available wrappers
+ fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
+ outencoding='utf-8')
+
+ handle, pathname = tempfile.mkstemp('.html')
+ with os.fdopen(handle, 'w+b') as tfile:
+ fmt.format(tokensource, tfile)
+ catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
+ try:
+ import subprocess
+ po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
+ stdout=subprocess.PIPE)
+ ret = po.wait()
+ output = po.stdout.read()
+ po.stdout.close()
+ except OSError:
+ # nsgmls not available
+ pass
+ else:
+ if ret:
+ print(output)
+ assert not ret, 'nsgmls run reported errors'
+
+ os.unlink(pathname)
+
+
+def test_get_style_defs_contains_pre_style():
+ style_defs = HtmlFormatter().get_style_defs().splitlines()
+ assert style_defs[0] == 'pre { line-height: 125%; }'
+
+
+def test_get_style_defs_contains_default_line_numbers_styles():
+ style_defs = HtmlFormatter().get_style_defs().splitlines()
+
+ assert style_defs[1] == (
+ 'td.linenos .normal '
+ '{ color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }'
+ )
+ assert style_defs[2] == (
+ 'span.linenos '
+ '{ color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }'
+ )
+
+
+def test_get_style_defs_contains_style_specific_line_numbers_styles():
+ class TestStyle(Style):
+ line_number_color = '#ff0000'
+ line_number_background_color = '#0000ff'
+ line_number_special_color = '#00ff00'
+ line_number_special_background_color = '#ffffff'
+
+ style_defs = HtmlFormatter(style=TestStyle).get_style_defs().splitlines()
+
+ assert style_defs[1] == (
+ 'td.linenos .normal '
+ '{ color: #ff0000; background-color: #0000ff; padding-left: 5px; padding-right: 5px; }'
+ )
+ assert style_defs[2] == (
+ 'span.linenos '
+ '{ color: #ff0000; background-color: #0000ff; padding-left: 5px; padding-right: 5px; }'
+ )
+ assert style_defs[3] == (
+ 'td.linenos .special '
+ '{ color: #00ff00; background-color: #ffffff; padding-left: 5px; padding-right: 5px; }'
+ )
+ assert style_defs[4] == (
+ 'span.linenos.special '
+ '{ color: #00ff00; background-color: #ffffff; padding-left: 5px; padding-right: 5px; }'
+ )
+
+
+@pytest.mark.parametrize(
+ "formatter_kwargs, style_defs_args, assert_starts_with, assert_contains",
+ [
+ [{}, [], ".", []],
+ [{"cssclass": "foo"}, [], ".foo .", []],
+ [{"cssclass": "foo"}, [".bar"], ".bar .", []],
+ [{"cssclass": "foo"}, [[".bar", ".baz"]], ".ba", [".bar .", ".baz ."]],
+ ]
+)
+def test_get_token_style_defs_uses_css_prefix(
+ formatter_kwargs, style_defs_args, assert_starts_with, assert_contains
+):
+ formatter = HtmlFormatter(**formatter_kwargs)
+
+ for line in formatter.get_token_style_defs(*style_defs_args):
+ assert line.startswith(assert_starts_with)
+ for s in assert_contains:
+ assert s in line
+
+
+def test_get_background_style_defs_uses_multiple_css_prefixes():
+ formatter = HtmlFormatter()
+
+ lines = formatter.get_background_style_defs([".foo", ".bar"])
+ assert lines[0].startswith(".foo .hll, .bar .hll {")
+ assert lines[1].startswith(".foo , .bar {")
+
+
+def test_unicode_options():
+ fmt = HtmlFormatter(title='Föö',
+ cssclass='bär',
+ cssstyles='div:before { content: \'bäz\' }',
+ encoding='utf-8')
+ handle, pathname = tempfile.mkstemp('.html')
+ with os.fdopen(handle, 'w+b') as tfile:
+ fmt.format(tokensource, tfile)
+
+
+def test_ctags():
+ try:
+ import ctags
+ except ImportError:
+ # we can't check without the ctags module, but at least check the exception
+ assert pytest.raises(
+ RuntimeError, HtmlFormatter, tagsfile='support/tags'
+ )
+ else:
+ # this tagfile says that test_ctags() is on line 165, even if it isn't
+ # anymore in the actual source
+ fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
+ tagurlformat='%(fname)s%(fext)s')
+ outfile = StringIO()
+ fmt.format(tokensource, outfile)
+ assert '<a href="test_html_formatter.py#L-165">test_ctags</a>' \
+ in outfile.getvalue()
+
+
+def test_filename():
+ optdict = dict(filename="test.py")
+ outfile = StringIO()
+ fmt = HtmlFormatter(**optdict)
+ fmt.format(tokensource, outfile)
+ html = outfile.getvalue()
+ assert re.search("<span class=\"filename\">test.py</span><pre>", html)
+
+
+def test_debug_token_types():
+ fmt_nod_token_types = HtmlFormatter(debug_token_types=False)
+ outfile_nod_token_types = StringIO()
+ fmt_nod_token_types.format(tokensource, outfile_nod_token_types)
+ html_nod_token_types = outfile_nod_token_types.getvalue()
+ assert '<span class="n" title="Name">TESTDIR</span>' not in html_nod_token_types
+
+ fmt_debug_token_types = HtmlFormatter(debug_token_types=True)
+ outfile_debug_token_types = StringIO()
+ fmt_debug_token_types.format(tokensource, outfile_debug_token_types)
+ html_debug_token_types = outfile_debug_token_types.getvalue()
+ assert '<span class="n" title="Name">TESTDIR</span>' in html_debug_token_types
diff --git a/tests/test_html_formatter_linenos_elements.py b/tests/test_html_formatter_linenos_elements.py
new file mode 100644
index 0000000..286b60c
--- /dev/null
+++ b/tests/test_html_formatter_linenos_elements.py
@@ -0,0 +1,63 @@
+import os
+from io import StringIO
+
+import pytest
+
+from pygments.formatters import HtmlFormatter
+from pygments.lexers import PythonLexer
+
+from .support import structural_diff
+
+TESTDIR = os.path.dirname(os.path.abspath(__file__))
+EXPECTED_OUTPUT_DIR = os.path.join(TESTDIR, "html_linenos_expected_output")
+CODE = list(PythonLexer().get_tokens("# a\n# b\n# c"))
+
+
+def single_line(text):
+ return "".join(l.strip() for l in text.splitlines())
+
+
+# Note: option `anchorlinenos` is currently ignored for `linenos=inline`
+@pytest.mark.parametrize("linenos", ["inline", "table"])
+@pytest.mark.parametrize("noclasses", ["False", "True"])
+@pytest.mark.parametrize("linenostep", ["1", "2"])
+@pytest.mark.parametrize("linenostart", ["1", "8"])
+@pytest.mark.parametrize("linenospecial", ["0", "3"])
+@pytest.mark.parametrize("anchorlinenos", ["False", "True"])
+@pytest.mark.parametrize("filename", ["", "testfilename"])
+def test_linenos_elements(
+ linenos, noclasses, linenostep, linenostart, linenospecial, anchorlinenos, filename
+):
+ options = dict(
+ linenos=linenos,
+ noclasses=noclasses,
+ linenostep=linenostep,
+ linenostart=linenostart,
+ linenospecial=linenospecial,
+ anchorlinenos=anchorlinenos,
+ filename=filename,
+ )
+
+ output = StringIO()
+ fmt = HtmlFormatter(**options)
+ fmt.format(CODE, output)
+ html = output.getvalue()
+
+ filename_parts = []
+ filename_parts.append(linenos)
+ filename_parts.append("nocls" if noclasses == "True" else "cls")
+ filename_parts.append("step_" + linenostep)
+ filename_parts.append("start_" + linenostart)
+ filename_parts.append("special_" + linenospecial)
+ filename_parts.append("anchor" if anchorlinenos == "True" else "noanchor")
+ filename_parts.append("filename" if filename else "nofilename")
+ expected_html_filename = "_".join(filename_parts) + ".html"
+
+ # with open(os.path.join(EXPECTED_OUTPUT_DIR, expected_html_filename), 'w') as f:
+ # import bs4 as BeautifulSoup
+ # f.write(str(BeautifulSoup.BeautifulSoup(html, 'html.parser')))
+
+ with open(os.path.join(EXPECTED_OUTPUT_DIR, expected_html_filename)) as f:
+ expected_html = f.read()
+
+ structural_diff.structural_diff(html, expected_html)
diff --git a/tests/test_html_lexer.py b/tests/test_html_lexer.py
new file mode 100644
index 0000000..fe99149
--- /dev/null
+++ b/tests/test_html_lexer.py
@@ -0,0 +1,131 @@
+"""
+ HTML Lexer Tests
+ ~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import time
+
+import pytest
+
+from pygments.lexers.html import HtmlLexer
+from pygments.token import Token
+
+MAX_HL_TIME = 10
+
+
+@pytest.fixture(scope='module')
+def lexer_html():
+ yield HtmlLexer()
+
+
+def test_happy_javascript_fragment(lexer_html):
+ """valid, even long Javascript fragments should still get parsed ok"""
+
+ fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*2000+"</script>"
+ start_time = time.time()
+ tokens = list(lexer_html.get_tokens(fragment))
+ assert all(x[1] != Token.Error for x in tokens)
+ assert time.time() - start_time < MAX_HL_TIME, \
+ 'The HTML lexer might have an expensive happy-path script case'
+
+
+def test_happy_css_fragment(lexer_html):
+ """valid, even long CSS fragments should still get parsed ok"""
+
+ fragment = "<style>"+".ui-helper-hidden{display:none}"*2000+"</style>"
+ start_time = time.time()
+ tokens = list(lexer_html.get_tokens(fragment))
+ assert all(x[1] != Token.Error for x in tokens)
+ assert time.time() - start_time < MAX_HL_TIME, \
+ 'The HTML lexer might have an expensive happy-path style case'
+
+
+def test_long_unclosed_javascript_fragment(lexer_html):
+ """unclosed, long Javascript fragments should parse quickly"""
+
+ reps = 2000
+ fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*reps
+ start_time = time.time()
+ tokens = list(lexer_html.get_tokens(fragment))
+ assert time.time() - start_time < MAX_HL_TIME, \
+ 'The HTML lexer might have an expensive error script case'
+ tokens_intro = [
+ (Token.Punctuation, '<'),
+ (Token.Name.Tag, 'script'),
+ (Token.Text, ' '),
+ (Token.Name.Attribute, 'type'),
+ (Token.Operator, '='),
+ (Token.Literal.String, '"text/javascript"'),
+ (Token.Punctuation, '>'),
+ ]
+ tokens_body = [
+ (Token.Name.Other, 'alert'),
+ (Token.Punctuation, '('),
+ (Token.Literal.String.Double, '"hi"'),
+ (Token.Punctuation, ')'),
+ (Token.Punctuation, ';'),
+ ]
+
+ # make sure we get the right opening tokens
+ assert tokens[:len(tokens_intro)] == tokens_intro
+ # and make sure we get the right body tokens even though the script is
+ # unclosed
+ assert tokens[len(tokens_intro):-1] == tokens_body * reps
+ # and of course, the newline we get for free from get_tokens
+ assert tokens[-1] == (Token.Text.Whitespace, "\n")
+
+
+def test_long_unclosed_css_fragment(lexer_html):
+ """unclosed, long CSS fragments should parse quickly"""
+
+ reps = 2000
+ fragment = "<style>"+".ui-helper-hidden{display:none}"*reps
+ start_time = time.time()
+ tokens = list(lexer_html.get_tokens(fragment))
+ assert time.time() - start_time < MAX_HL_TIME, \
+ 'The HTML lexer might have an expensive error style case'
+
+ tokens_intro = [
+ (Token.Punctuation, '<'),
+ (Token.Name.Tag, 'style'),
+ (Token.Punctuation, '>'),
+ ]
+ tokens_body = [
+ (Token.Punctuation, '.'),
+ (Token.Name.Class, 'ui-helper-hidden'),
+ (Token.Punctuation, '{'),
+ (Token.Keyword, 'display'),
+ (Token.Punctuation, ':'),
+ (Token.Keyword.Constant, 'none'),
+ (Token.Punctuation, '}'),
+ ]
+
+ # make sure we get the right opening tokens
+ assert tokens[:len(tokens_intro)] == tokens_intro
+ # and make sure we get the right body tokens even though the style block is
+ # unclosed
+ assert tokens[len(tokens_intro):-1] == tokens_body * reps
+ # and of course, the newline we get for free from get_tokens
+ assert tokens[-1] == (Token.Text.Whitespace, "\n")
+
+
+def test_unclosed_fragment_with_newline_recovery(lexer_html):
+ """unclosed Javascript fragments should recover on the next line"""
+
+ fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*20+"\n<div>hi</div>"
+ tokens = list(lexer_html.get_tokens(fragment))
+ recovery_tokens = [
+ (Token.Punctuation, '<'),
+ (Token.Name.Tag, 'div'),
+ (Token.Punctuation, '>'),
+ (Token.Text, 'hi'),
+ (Token.Punctuation, '<'),
+ (Token.Punctuation, '/'),
+ (Token.Name.Tag, 'div'),
+ (Token.Punctuation, '>'),
+ (Token.Text, '\n'),
+ ]
+ assert tokens[-1*len(recovery_tokens):] == recovery_tokens
diff --git a/tests/test_inherit.py b/tests/test_inherit.py
new file mode 100644
index 0000000..a276378
--- /dev/null
+++ b/tests/test_inherit.py
@@ -0,0 +1,101 @@
+"""
+ Tests for inheritance in RegexLexer
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, inherit
+from pygments.token import Text
+
+
+class One(RegexLexer):
+ tokens = {
+ 'root': [
+ ('a', Text),
+ ('b', Text),
+ ],
+ }
+
+
+class Two(One):
+ tokens = {
+ 'root': [
+ ('x', Text),
+ inherit,
+ ('y', Text),
+ ],
+ }
+
+
+class Three(Two):
+ tokens = {
+ 'root': [
+ ('i', Text),
+ inherit,
+ ('j', Text),
+ ],
+ }
+
+
+class Beginning(Two):
+ tokens = {
+ 'root': [
+ inherit,
+ ('m', Text),
+ ],
+ }
+
+
+class End(Two):
+ tokens = {
+ 'root': [
+ ('m', Text),
+ inherit,
+ ],
+ }
+
+
+class Empty(One):
+ tokens = {}
+
+
+class Skipped(Empty):
+ tokens = {
+ 'root': [
+ ('x', Text),
+ inherit,
+ ('y', Text),
+ ],
+ }
+
+
+def test_single_inheritance_position():
+ t = Two()
+ pats = [x[0].__self__.pattern for x in t._tokens['root']]
+ assert ['x', 'a', 'b', 'y'] == pats
+
+
+def test_multi_inheritance_beginning():
+ t = Beginning()
+ pats = [x[0].__self__.pattern for x in t._tokens['root']]
+ assert ['x', 'a', 'b', 'y', 'm'] == pats
+
+
+def test_multi_inheritance_end():
+ t = End()
+ pats = [x[0].__self__.pattern for x in t._tokens['root']]
+ assert ['m', 'x', 'a', 'b', 'y'] == pats
+
+
+def test_multi_inheritance_position():
+ t = Three()
+ pats = [x[0].__self__.pattern for x in t._tokens['root']]
+ assert ['i', 'x', 'a', 'b', 'y', 'j'] == pats
+
+
+def test_single_inheritance_with_skip():
+ t = Skipped()
+ pats = [x[0].__self__.pattern for x in t._tokens['root']]
+ assert ['x', 'a', 'b', 'y'] == pats
diff --git a/tests/test_irc_formatter.py b/tests/test_irc_formatter.py
new file mode 100644
index 0000000..c930166
--- /dev/null
+++ b/tests/test_irc_formatter.py
@@ -0,0 +1,30 @@
+"""
+ Pygments IRC formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from io import StringIO
+
+from pygments.lexers import PythonLexer
+from pygments.formatters import IRCFormatter
+
+tokensource = list(PythonLexer().get_tokens("lambda x: 123"))
+newlinetokensource = list(PythonLexer().get_tokens("from \\\n\\\n os import path\n"))
+
+def test_correct_output():
+ hfmt = IRCFormatter()
+ houtfile = StringIO()
+ hfmt.format(tokensource, houtfile)
+
+ assert '\x0302lambda\x03 x: \x0302123\x03\n' == houtfile.getvalue()
+
+def test_linecount_output():
+ hfmt = IRCFormatter(linenos = True)
+ houtfile = StringIO()
+ hfmt.format(newlinetokensource, houtfile)
+
+ expected_out = '0001: \x0302from\x03 \\\n0002: \\\n0003: \x1d\x0310os\x03\x1d \x0302import\x03 path\n0004: '
+ assert expected_out == houtfile.getvalue()
diff --git a/tests/test_java.py b/tests/test_java.py
new file mode 100644
index 0000000..a50e862
--- /dev/null
+++ b/tests/test_java.py
@@ -0,0 +1,40 @@
+"""
+ Basic JavaLexer Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import time
+
+import pytest
+
+from pygments.token import String
+from pygments.lexers import JavaLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield JavaLexer()
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '""', '"abc"', '"ひらがな"', '"123"',
+ '"\\\\"', '"\\t"' '"\\""',
+ ),
+)
+def test_string_literals_positive_match(lexer, text):
+ """Test positive matches for string literals."""
+ tokens = list(lexer.get_tokens_unprocessed(text))
+ assert all([token is String for _, token, _ in tokens])
+ assert ''.join([value for _, _, value in tokens]) == text
+
+
+def test_string_literals_backtracking(lexer):
+ """Test catastrophic backtracking for string literals."""
+ start_time = time.time()
+ list(lexer.get_tokens_unprocessed('"' + '\\' * 100))
+ assert time.time() - start_time < 1, 'possible backtracking bug'
diff --git a/tests/test_javascript.py b/tests/test_javascript.py
new file mode 100644
index 0000000..05f74e2
--- /dev/null
+++ b/tests/test_javascript.py
@@ -0,0 +1,84 @@
+"""
+ Javascript tests
+ ~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.javascript import JavascriptLexer
+from pygments.token import Number
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield JavascriptLexer()
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '1', '1.', '.1', '1.1', '1e1', '1E1', '1e+1', '1E-1', '1.e1', '.1e1',
+ '0888', # octal prefix with non-octal numbers
+ )
+)
+def test_float_literal_positive_matches(lexer, text):
+ """Test literals that should be tokenized as float literals."""
+ assert list(lexer.get_tokens(text))[0] == (Number.Float, text)
+
+
+@pytest.mark.parametrize('text', ('.\u0b6a', '.', '1..', '1n', '1ee', '1e',
+ '1e-', '1e--1', '1e++1', '1e1.0'))
+def test_float_literals_negative_matches(lexer, text):
+ """Test text that should **not** be tokenized as float literals."""
+ assert list(lexer.get_tokens(text))[0] != (Number.Float, text)
+
+
+@pytest.mark.parametrize('text', ('0n', '123n'))
+def test_integer_literal_positive_matches(lexer, text):
+ """Test literals that should be tokenized as integer literals."""
+ assert list(lexer.get_tokens(text))[0] == (Number.Integer, text)
+
+
+@pytest.mark.parametrize('text', ('1N', '1', '1.0'))
+def test_integer_literals_negative_matches(lexer, text):
+ """Test text that should **not** be tokenized as integer literals."""
+ assert list(lexer.get_tokens(text))[0] != (Number.Integer, text)
+
+
+@pytest.mark.parametrize('text', ('0b01', '0B10n'))
+def test_binary_literal_positive_matches(lexer, text):
+ """Test literals that should be tokenized as binary literals."""
+ assert list(lexer.get_tokens(text))[0] == (Number.Bin, text)
+
+
+@pytest.mark.parametrize('text', ('0b0N', '0b', '0bb', '0b2'))
+def test_binary_literals_negative_matches(lexer, text):
+ """Test text that should **not** be tokenized as binary literals."""
+ assert list(lexer.get_tokens(text))[0] != (Number.Bin, text)
+
+
+@pytest.mark.parametrize('text', ('017', '071n', '0o11', '0O77n'))
+def test_octal_literal_positive_matches(lexer, text):
+ """Test literals that should be tokenized as octal literals."""
+ assert list(lexer.get_tokens(text))[0] == (Number.Oct, text)
+
+
+@pytest.mark.parametrize('text', ('01N', '089', '098n', '0o', '0OO', '0o88', '0O88n'))
+def test_octal_literals_negative_matches(lexer, text):
+ """Test text that should **not** be tokenized as octal literals."""
+ assert list(lexer.get_tokens(text))[0] != (Number.Oct, text)
+
+
+@pytest.mark.parametrize('text', ('0x01', '0Xefn', '0x0EF'))
+def test_hexadecimal_literal_positive_matches(lexer, text):
+ """Test literals that should be tokenized as hexadecimal literals."""
+ assert list(lexer.get_tokens(text))[0] == (Number.Hex, text)
+
+
+@pytest.mark.parametrize('text', ('0x0N', '0x', '0Xx', '0xg', '0xhn'))
+def test_hexadecimal_literals_negative_matches(lexer, text):
+ """Test text that should **not** be tokenized as hexadecimal literals."""
+ assert list(lexer.get_tokens(text))[0] != (Number.Hex, text)
diff --git a/tests/test_latex_formatter.py b/tests/test_latex_formatter.py
new file mode 100644
index 0000000..42c5eda
--- /dev/null
+++ b/tests/test_latex_formatter.py
@@ -0,0 +1,107 @@
+"""
+ Pygments LaTeX formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import tempfile
+from os import path
+from io import StringIO
+from textwrap import dedent
+
+import pytest
+
+from pygments.formatters import LatexFormatter
+from pygments.formatters.latex import LatexEmbeddedLexer
+from pygments.lexers import PythonLexer, PythonConsoleLexer
+from pygments.token import Token
+
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_latex_formatter.py')
+
+
+def test_correct_output():
+ with open(TESTFILE) as fp:
+ tokensource = list(PythonLexer().get_tokens(fp.read()))
+ hfmt = LatexFormatter(nowrap=True)
+ houtfile = StringIO()
+ hfmt.format(tokensource, houtfile)
+
+ assert r'\begin{Verbatim}' not in houtfile.getvalue()
+ assert r'\end{Verbatim}' not in houtfile.getvalue()
+
+
+def test_valid_output():
+ with open(TESTFILE) as fp:
+ tokensource = list(PythonLexer().get_tokens(fp.read()))
+ fmt = LatexFormatter(full=True, encoding='latin1')
+
+ handle, pathname = tempfile.mkstemp('.tex')
+ # place all output files in /tmp too
+ old_wd = os.getcwd()
+ os.chdir(os.path.dirname(pathname))
+ tfile = os.fdopen(handle, 'wb')
+ fmt.format(tokensource, tfile)
+ tfile.close()
+ try:
+ import subprocess
+ po = subprocess.Popen(['latex', '-interaction=nonstopmode',
+ pathname], stdout=subprocess.PIPE)
+ ret = po.wait()
+ output = po.stdout.read()
+ po.stdout.close()
+ except OSError as e:
+ # latex not available
+ pytest.skip(str(e))
+ else:
+ if ret:
+ print(output)
+ assert not ret, 'latex run reported errors'
+
+ os.unlink(pathname)
+ os.chdir(old_wd)
+
+
+def test_embedded_lexer():
+ # Latex surrounded by '|' should be Escaped
+ lexer = LatexEmbeddedLexer('|', '|', PythonConsoleLexer())
+
+ # similar to gh-1516
+ src = dedent("""\
+ >>> x = 1
+ >>> y = mul(x, |$z^2$|) # these |pipes| are untouched
+ >>> y
+ |$1 + z^2$|""")
+
+ assert list(lexer.get_tokens(src)) == [
+ (Token.Generic.Prompt, '>>> '),
+ (Token.Name, 'x'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Text.Whitespace, '\n'),
+ (Token.Generic.Prompt, '>>> '),
+ (Token.Name, 'y'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Name, 'mul'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'x'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Escape, '$z^2$'),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Comment.Single, '# these |pipes| are untouched'), # note: not Token.Escape
+ (Token.Text.Whitespace, '\n'),
+ (Token.Generic.Prompt, '>>> '),
+ (Token.Name, 'y'),
+ (Token.Text.Whitespace, '\n'),
+ (Token.Escape, '$1 + z^2$'),
+ (Token.Generic.Output, '\n'),
+ ]
diff --git a/tests/test_markdown_lexer.py b/tests/test_markdown_lexer.py
new file mode 100644
index 0000000..46b2911
--- /dev/null
+++ b/tests/test_markdown_lexer.py
@@ -0,0 +1,178 @@
+"""
+ Pygments Markdown lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+from pygments.token import Generic, Token, String
+
+from pygments.lexers.markup import MarkdownLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield MarkdownLexer()
+
+
+def assert_same_text(lexer, text):
+ """Show that lexed markdown does not remove any content. """
+ tokens = list(lexer.get_tokens_unprocessed(text))
+ output = ''.join(t[2] for t in tokens)
+ assert text == output
+
+
+def test_code_fence(lexer):
+ assert_same_text(lexer, r'```\nfoo\n```\n')
+
+
+def test_code_fence_gsm(lexer):
+ assert_same_text(lexer, r'```markdown\nfoo\n```\n')
+
+
+def test_code_fence_gsm_with_no_lexer(lexer):
+ assert_same_text(lexer, r'```invalid-lexer\nfoo\n```\n')
+
+
+def test_invalid_atx_heading(lexer):
+ fragments = (
+ '#',
+ 'a #',
+ '*#',
+ )
+
+ for fragment in fragments:
+ for token, _ in lexer.get_tokens(fragment):
+ assert token != Generic.Heading
+
+
+def test_atx_heading(lexer):
+ fragments = (
+ '#Heading',
+ '# Heading',
+ '# Another heading',
+ '# Another # heading',
+ '# Heading #',
+ )
+
+ for fragment in fragments:
+ tokens = [
+ (Generic.Heading, fragment),
+ (Token.Text, '\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_invalid_atx_subheading(lexer):
+ fragments = (
+ '##',
+ 'a ##',
+ '*##',
+ '####### too many hashes'
+ )
+
+ for fragment in fragments:
+ for token, _ in lexer.get_tokens(fragment):
+ assert token != Generic.Subheading
+
+
+def test_atx_subheading(lexer):
+ fragments = (
+ '##Subheading',
+ '## Subheading',
+ '### Subheading',
+ '#### Subheading',
+ '##### Subheading',
+ '###### Subheading',
+ '## Another subheading',
+ '## Another ## subheading',
+ '###### Subheading #',
+ '###### Subheading ######',
+ )
+
+ for fragment in fragments:
+ tokens = [
+ (Generic.Subheading, fragment),
+ (Token.Text, '\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_invalid_setext_heading(lexer):
+ fragments = (
+ 'Heading\n',
+ 'Heading\n_',
+ 'Heading\n =====',
+ 'Heading\na=====',
+ '=====',
+ '\n=\n',
+ 'Heading\n=====Text'
+ )
+
+ for fragment in fragments:
+ for token, _ in lexer.get_tokens(fragment):
+ assert token != Generic.Heading
+
+
+def test_setext_heading(lexer):
+ fragments = (
+ 'Heading\n=',
+ 'Heading\n=======',
+ 'Heading\n==========',
+ )
+
+ for fragment in fragments:
+ tokens = [
+ (Generic.Heading, fragment.split('\n')[0]),
+ (Token.Text, '\n'),
+ (Generic.Heading, fragment.split('\n')[1]),
+ (Token.Text, '\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_invalid_setext_subheading(lexer):
+ fragments = (
+ 'Subheading\n',
+ 'Subheading\n_',
+ 'Subheading\n -----',
+ 'Subheading\na-----',
+ '-----',
+ '\n-\n',
+ 'Subheading\n-----Text'
+ )
+
+ for fragment in fragments:
+ for token, _ in lexer.get_tokens(fragment):
+ assert token != Generic.Subheading
+
+
+def test_setext_subheading(lexer):
+ fragments = (
+ 'Subheading\n-',
+ 'Subheading\n----------',
+ 'Subheading\n-----------',
+ )
+
+ for fragment in fragments:
+ tokens = [
+ (Generic.Subheading, fragment.split('\n')[0]),
+ (Token.Text, '\n'),
+ (Generic.Subheading, fragment.split('\n')[1]),
+ (Token.Text, '\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_invalid_code_block(lexer):
+ fragments = (
+ '```code```',
+ 'prefix not allowed before ```\ncode block\n```'
+ ' code',
+ )
+
+ for fragment in fragments:
+ for token, _ in lexer.get_tokens(fragment):
+ assert token != String.Backtick
diff --git a/tests/test_modeline.py b/tests/test_modeline.py
new file mode 100644
index 0000000..86acbd7
--- /dev/null
+++ b/tests/test_modeline.py
@@ -0,0 +1,20 @@
+"""
+ Tests for the vim modeline feature
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments import modeline
+
+
+def test_modelines():
+ for buf in [
+ 'vi: ft=python' + '\n' * 8,
+ 'vi: ft=python' + '\n' * 8,
+ '\n\n\n\nvi=8: syntax=python' + '\n' * 8,
+ '\n' * 8 + 'ex: filetype=python',
+ '\n' * 8 + 'vim: some,other,syn=python\n\n\n\n'
+ ]:
+ assert modeline.get_filetype_from_buffer(buf) == 'python'
diff --git a/tests/test_mysql.py b/tests/test_mysql.py
new file mode 100644
index 0000000..39da8f5
--- /dev/null
+++ b/tests/test_mysql.py
@@ -0,0 +1,273 @@
+"""
+ Pygments MySQL lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.sql import MySqlLexer
+
+from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \
+ Punctuation, String, Whitespace
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield MySqlLexer()
+
+
+@pytest.mark.parametrize('text', ('1', '22', '22 333', '22 a', '22+', '22)',
+ '22\n333', '22\r\n333'))
+def test_integer_literals_positive_match(lexer, text):
+ """Validate that integer literals are tokenized as integers."""
+ token = list(lexer.get_tokens(text))[0]
+ assert token[0] == Number.Integer
+ assert token[1] in {'1', '22'}
+
+
+@pytest.mark.parametrize('text', ('1a', '1A', '1.', '1ひ', '1$', '1_',
+ '1\u0080', '1\uffff'))
+def test_integer_literals_negative_match(lexer, text):
+ """Validate that non-integer texts are not matched as integers."""
+ assert list(lexer.get_tokens(text))[0][0] != Number.Integer
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '.123', '1.23', '123.',
+ '1e10', '1.0e10', '1.e-10', '.1e+10',
+ ),
+)
+def test_float_literals(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Number.Float, text)
+
+
+@pytest.mark.parametrize('text', ("X'0af019'", "x'0AF019'", "0xaf019"))
+def test_hexadecimal_literals(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Number.Hex, text)
+
+
+@pytest.mark.parametrize('text', ("B'010'", "b'010'", "0b010"))
+def test_binary_literals(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Number.Bin, text)
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ "{d'2020-01-01'}", "{ d ' 2020^01@01 ' }",
+ "{t'8 9:10:11'}", "{ t ' 09:10:11.12 ' }", "{ t ' 091011 ' }",
+ '{ts"2020-01-01 09:10:11"}', "{ ts ' 2020@01/01 09:10:11 ' }",
+ ),
+)
+def test_temporal_literals(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Literal.Date, text)
+
+
+@pytest.mark.parametrize(
+ 'text, expected_types',
+ (
+ (r"'a'", (String.Single,) * 3),
+ (r"""'""'""", (String.Single,) * 3),
+ (r"''''", (String.Single, String.Escape, String.Single)),
+ (r"'\''", (String.Single, String.Escape, String.Single)),
+ (r'"a"', (String.Double,) * 3),
+ (r'''"''"''', (String.Double,) * 3),
+ (r'""""', (String.Double, String.Escape, String.Double)),
+ (r'"\""', (String.Double, String.Escape, String.Double)),
+ ),
+)
+def test_string_literals(lexer, text, expected_types):
+ tokens = list(lexer.get_tokens(text))[:len(expected_types)]
+ assert all(t[0] == e for t, e in zip(tokens, expected_types))
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ "@a", "@1", "@._.$",
+ "@'?'", """@'abc''def"`ghi'""",
+ '@"#"', '''@"abc""def'`ghi"''',
+ '@`^`', """@`abc``def'"ghi`""",
+ "@@timestamp",
+ "@@session.auto_increment_offset",
+ "@@global.auto_increment_offset",
+ "@@persist.auto_increment_offset",
+ "@@persist_only.auto_increment_offset",
+ '?',
+ ),
+)
+def test_variables(lexer, text):
+ tokens = list(lexer.get_tokens(text))
+ assert all(t[0] == Name.Variable for t in tokens[:-1])
+ assert ''.join([t[1] for t in tokens]).strip() == text.strip()
+
+
+@pytest.mark.parametrize('text', ('true', 'false', 'null', 'unknown'))
+def test_constants(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Name.Constant, text)
+
+
+@pytest.mark.parametrize('text', ('-- abc', '--\tabc', '#abc'))
+def test_comments_single_line(lexer, text):
+ # Test the standalone comment.
+ tokens = list(lexer.get_tokens(text))
+ assert tokens[0] == (Comment.Single, text)
+
+ # Test the comment with mixed tokens.
+ tokens = list(lexer.get_tokens('select' + text + '\nselect'))
+ assert tokens[0] == (Keyword, 'select')
+ assert tokens[1] == (Comment.Single, text)
+ assert tokens[-2] == (Keyword, 'select')
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '/**/a', '/*a*b/c*/a', '/*\nabc\n*/a',
+ '/* /* */a'
+ )
+)
+def test_comments_multi_line(lexer, text):
+ tokens = list(lexer.get_tokens(text))
+ assert all(token[0] == Comment.Multiline for token in tokens[:-2])
+ assert ''.join(token[1] for token in tokens).strip() == text.strip()
+
+ # Validate nested comments are not supported.
+ assert tokens[-2][0] != Comment.Multiline
+
+
+@pytest.mark.parametrize(
+ 'text', ('BKA', 'SEMIJOIN'))
+def test_optimizer_hints(lexer, text):
+ good = '/*+ ' + text + '(), */'
+ ignore = '/* ' + text + ' */'
+ bad1 = '/*+ a' + text + '() */'
+ bad2 = '/*+ ' + text + 'a */'
+ assert (Comment.Preproc, text) in lexer.get_tokens(good)
+ assert (Comment.Preproc, text) not in lexer.get_tokens(ignore)
+ assert (Comment.Preproc, text) not in lexer.get_tokens(bad1)
+ assert (Comment.Preproc, text) not in lexer.get_tokens(bad2)
+
+
+@pytest.mark.parametrize(
+ 'text, expected_types',
+ (
+ # SET exceptions
+ ('SET', (Keyword,)),
+ ('SET abc = 1;', (Keyword,)),
+ ('SET @abc = 1;', (Keyword,)),
+ ('CHARACTER SET latin1', (Keyword, Whitespace, Keyword,)),
+ ('SET("r", "g", "b")', (Keyword.Type, Punctuation)),
+ ('SET ("r", "g", "b")', (Keyword.Type, Whitespace, Punctuation)),
+ ),
+)
+def test_exceptions(lexer, text, expected_types):
+ tokens = list(lexer.get_tokens(text))[:len(expected_types)]
+ assert all(t[0] == e for t, e in zip(tokens, expected_types))
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ 'SHOW', 'CREATE', 'ALTER', 'DROP',
+ 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'WHERE', 'GROUP', 'ORDER', 'BY', 'AS',
+ 'DISTINCT', 'JOIN', 'WITH', 'RECURSIVE',
+ 'PARTITION', 'NTILE', 'MASTER_PASSWORD', 'XA',
+ 'REQUIRE_TABLE_PRIMARY_KEY_CHECK', 'STREAM',
+ ),
+)
+def test_keywords(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Keyword, text)
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ # Standard
+ 'INT(', 'VARCHAR(', 'ENUM(', 'DATETIME', 'GEOMETRY', 'POINT', 'JSON',
+ # Aliases and compatibility
+ 'FIXED', 'MEDIUMINT', 'INT3', 'REAL', 'SERIAL',
+ 'LONG', 'NATIONAL', 'PRECISION', 'VARYING',
+ ),
+)
+def test_data_types(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Keyword.Type, text.strip('('))
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ # Common
+ 'CAST', 'CONCAT_WS', 'DAYNAME', 'IFNULL', 'NOW', 'SUBSTR',
+ # Less common
+ 'CAN_ACCESS_COLUMN', 'JSON_CONTAINS_PATH', 'ST_GEOMFROMGEOJSON',
+ ),
+)
+def test_functions(lexer, text):
+ assert list(lexer.get_tokens(text + '('))[0] == (Name.Function, text)
+ assert list(lexer.get_tokens(text + ' ('))[0] == (Name.Function, text)
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ 'abc_$123', '上市年限', 'ひらがな', '123_$abc', '123ひらがな',
+ ),
+)
+def test_schema_object_names_unquoted(lexer, text):
+ tokens = list(lexer.get_tokens(text))[:-1]
+ assert all(token[0] == Name for token in tokens)
+ assert ''.join(token[1] for token in tokens) == text
+
+
+@pytest.mark.parametrize(
+ 'text',
+ (
+ '`a`', '`1`', '`上市年限`', '`ひらがな`', '`select`', '`concat(`',
+ '`-- `', '`/*`', '`#`',
+ ),
+)
+def test_schema_object_names_quoted(lexer, text):
+ tokens = list(lexer.get_tokens(text))[:-1]
+ assert tokens[0] == (Name.Quoted, '`')
+ assert tokens[1] == (Name.Quoted, text[1:-1])
+ assert tokens[2] == (Name.Quoted, '`')
+ assert ''.join(token[1] for token in tokens) == text
+
+
+@pytest.mark.parametrize('text', ('````', ))
+def test_schema_object_names_quoted_escaped(lexer, text):
+ """Test quoted schema object names with escape sequences."""
+ tokens = list(lexer.get_tokens(text))[:-1]
+ assert tokens[0] == (Name.Quoted, '`')
+ assert tokens[1] == (Name.Quoted.Escape, text[1:-1])
+ assert tokens[2] == (Name.Quoted, '`')
+ assert ''.join(token[1] for token in tokens) == text
+
+
+@pytest.mark.parametrize(
+ 'text',
+ ('+', '*', '/', '%', '&&', ':=', '!', '<', '->>', '^', '|', '~'),
+)
+def test_operators(lexer, text):
+ assert list(lexer.get_tokens(text))[0] == (Operator, text)
+
+
+@pytest.mark.parametrize(
+ 'text, expected_types',
+ (
+ ('abc.efg', (Name, Punctuation, Name)),
+ ('abc,efg', (Name, Punctuation, Name)),
+ ('MAX(abc)', (Name.Function, Punctuation, Name, Punctuation)),
+ ('efg;', (Name, Punctuation)),
+ ),
+)
+def test_punctuation(lexer, text, expected_types):
+ tokens = list(lexer.get_tokens(text))[:len(expected_types)]
+ assert all(t[0] == e for t, e in zip(tokens, expected_types))
diff --git a/tests/test_pangomarkup_formatter.py b/tests/test_pangomarkup_formatter.py
new file mode 100644
index 0000000..e468546
--- /dev/null
+++ b/tests/test_pangomarkup_formatter.py
@@ -0,0 +1,44 @@
+"""
+ Pygments Pango Markup formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments import highlight
+from pygments.formatters import PangoMarkupFormatter
+from pygments.lexers import JavascriptLexer
+
+INPUT = r"""
+function foobar(a, b) {
+ if (a > b) {
+ return a & b;
+ }
+ if (a < b) {
+ return true;
+ }
+ console.log("single quote ' and double quote \"")
+ console.log('single quote \' and double quote "')
+ // comment with äöü ç
+}
+"""
+
+OUTPUT = r"""<tt><span fgcolor="#"><b>function</b></span><span fgcolor="#"> </span>foobar(a,<span fgcolor="#"> </span>b)<span fgcolor="#"> </span>{<span fgcolor="#">
+ </span><span fgcolor="#"><b>if</b></span><span fgcolor="#"> </span>(a<span fgcolor="#"> </span><span fgcolor="#">></span><span fgcolor="#"> </span>b)<span fgcolor="#"> </span>{<span fgcolor="#">
+ </span><span fgcolor="#"><b>return</b></span><span fgcolor="#"> </span>a<span fgcolor="#"> </span><span fgcolor="#">&amp;</span><span fgcolor="#"> </span>b;<span fgcolor="#">
+ </span>}<span fgcolor="#">
+ </span><span fgcolor="#"><b>if</b></span><span fgcolor="#"> </span>(a<span fgcolor="#"> </span><span fgcolor="#">&lt;</span><span fgcolor="#"> </span>b)<span fgcolor="#"> </span>{<span fgcolor="#">
+ </span><span fgcolor="#"><b>return</b></span><span fgcolor="#"> </span><span fgcolor="#"><b>true</b></span>;<span fgcolor="#">
+ </span>}<span fgcolor="#">
+ </span>console.log(<span fgcolor="#">"single quote ' and double quote \""</span>)<span fgcolor="#">
+ </span>console.log(<span fgcolor="#">'single quote \' and double quote "'</span>)<span fgcolor="#">
+ </span><span fgcolor="#"><i>// comment with äöü ç</i></span><span fgcolor="#">
+</span>}<span fgcolor="#">
+</span></tt>"""
+
+def test_correct_output():
+ markup = highlight(INPUT, JavascriptLexer(), PangoMarkupFormatter())
+ assert OUTPUT == re.sub('<span fgcolor="#[^"]{6}">', '<span fgcolor="#">', markup)
diff --git a/tests/test_perllexer.py b/tests/test_perllexer.py
new file mode 100644
index 0000000..21bf749
--- /dev/null
+++ b/tests/test_perllexer.py
@@ -0,0 +1,190 @@
+"""
+ Pygments regex lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import time
+
+import pytest
+
+from pygments.token import Keyword, Name, String, Text
+from pygments.lexers.perl import PerlLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield PerlLexer()
+
+
+# Test runaway regexes.
+# A previous version of the Perl lexer would spend a great deal of
+# time backtracking when given particular strings. These tests show that
+# the runaway backtracking doesn't happen any more (at least for the given
+# cases).
+
+
+# Test helpers.
+
+def assert_single_token(lexer, s, token):
+ """Show that a given string generates only one token."""
+ tokens = list(lexer.get_tokens_unprocessed(s))
+ assert len(tokens) == 1
+ assert s == tokens[0][2]
+ assert token == tokens[0][1]
+
+
+def assert_tokens(lexer, strings, expected_tokens):
+ """Show that a given string generates the expected tokens."""
+ tokens = list(lexer.get_tokens_unprocessed(''.join(strings)))
+ parsed_strings = [t[2] for t in tokens]
+ assert parsed_strings == strings
+ parsed_tokens = [t[1] for t in tokens]
+ assert parsed_tokens == expected_tokens
+
+
+def assert_fast_tokenization(lexer, s):
+ """Show that a given string is tokenized quickly."""
+ start = time.time()
+ tokens = list(lexer.get_tokens_unprocessed(s))
+ end = time.time()
+ # Isn't 10 seconds kind of a long time? Yes, but we don't want false
+ # positives when the tests are starved for CPU time.
+ if end-start > 10:
+ pytest.fail('tokenization took too long')
+ return tokens
+
+
+# Strings.
+
+def test_single_quote_strings(lexer):
+ assert_single_token(lexer, r"'foo\tbar\\\'baz'", String)
+ assert_fast_tokenization(lexer, "'" + '\\'*999)
+
+
+def test_double_quote_strings(lexer):
+ assert_single_token(lexer, r'"foo\tbar\\\"baz"', String)
+ assert_fast_tokenization(lexer, '"' + '\\'*999)
+
+
+def test_backtick_strings(lexer):
+ assert_single_token(lexer, r'`foo\tbar\\\`baz`', String.Backtick)
+ assert_fast_tokenization(lexer, '`' + '\\'*999)
+
+
+# Regex matches with various delimiters.
+
+def test_match(lexer):
+ assert_single_token(lexer, r'/aa\tbb/', String.Regex)
+ assert_fast_tokenization(lexer, '/' + '\\'*999)
+
+
+def test_match_with_slash(lexer):
+ assert_tokens(lexer, ['m', '/\n\\t\\\\/'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm/xxx\n' + '\\'*999)
+
+
+def test_match_with_bang(lexer):
+ assert_tokens(lexer, ['m', r'!aa\t\!bb!'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm!' + '\\'*999)
+
+
+def test_match_with_brace(lexer):
+ assert_tokens(lexer, ['m', r'{aa\t\}bb}'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm{' + '\\'*999)
+
+
+def test_match_with_angle_brackets(lexer):
+ assert_tokens(lexer, ['m', r'<aa\t\>bb>'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm<' + '\\'*999)
+
+
+def test_match_with_parenthesis(lexer):
+ assert_tokens(lexer, ['m', r'(aa\t\)bb)'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm(' + '\\'*999)
+
+
+def test_match_with_at_sign(lexer):
+ assert_tokens(lexer, ['m', r'@aa\t\@bb@'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm@' + '\\'*999)
+
+
+def test_match_with_percent_sign(lexer):
+ assert_tokens(lexer, ['m', r'%aa\t\%bb%'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm%' + '\\'*999)
+
+
+def test_match_with_dollar_sign(lexer):
+ assert_tokens(lexer, ['m', r'$aa\t\$bb$'], [String.Regex, String.Regex])
+ assert_fast_tokenization(lexer, 'm$' + '\\'*999)
+
+
+# Regex substitutions with various delimeters.
+
+def test_substitution_with_slash(lexer):
+ assert_single_token(lexer, 's/aaa/bbb/g', String.Regex)
+ assert_fast_tokenization(lexer, 's/foo/' + '\\'*999)
+
+
+def test_substitution_with_at_sign(lexer):
+ assert_single_token(lexer, r's@aaa@bbb@g', String.Regex)
+ assert_fast_tokenization(lexer, 's@foo@' + '\\'*999)
+
+
+def test_substitution_with_percent_sign(lexer):
+ assert_single_token(lexer, r's%aaa%bbb%g', String.Regex)
+ assert_fast_tokenization(lexer, 's%foo%' + '\\'*999)
+
+
+def test_substitution_with_brace(lexer):
+ assert_single_token(lexer, r's{aaa}', String.Regex)
+ assert_fast_tokenization(lexer, 's{' + '\\'*999)
+
+
+def test_substitution_with_angle_bracket(lexer):
+ assert_single_token(lexer, r's<aaa>', String.Regex)
+ assert_fast_tokenization(lexer, 's<' + '\\'*999)
+
+
+def test_substitution_with_square_bracket(lexer):
+ assert_single_token(lexer, r's[aaa]', String.Regex)
+ assert_fast_tokenization(lexer, 's[' + '\\'*999)
+
+
+def test_substitution_with_parenthesis(lexer):
+ assert_single_token(lexer, r's(aaa)', String.Regex)
+ assert_fast_tokenization(lexer, 's(' + '\\'*999)
+
+
+# Namespaces/modules
+
+def test_package_statement(lexer):
+ assert_tokens(lexer, ['package', ' ', 'Foo'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+ assert_tokens(lexer, ['package', ' ', 'Foo::Bar'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+
+
+def test_use_statement(lexer):
+ assert_tokens(lexer, ['use', ' ', 'Foo'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+ assert_tokens(lexer, ['use', ' ', 'Foo::Bar'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+
+
+def test_no_statement(lexer):
+ assert_tokens(lexer, ['no', ' ', 'Foo'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+ assert_tokens(lexer, ['no', ' ', 'Foo::Bar'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+
+
+def test_require_statement(lexer):
+ assert_tokens(lexer, ['require', ' ', 'Foo'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+ assert_tokens(lexer, ['require', ' ', 'Foo::Bar'],
+ [Keyword, Text.Whitespace, Name.Namespace])
+ assert_tokens(lexer, ['require', ' ', '"Foo/Bar.pm"'],
+ [Keyword, Text.Whitespace, String])
diff --git a/tests/test_procfile.py b/tests/test_procfile.py
new file mode 100644
index 0000000..d919771
--- /dev/null
+++ b/tests/test_procfile.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+"""
+ Basic ProcfileLexer Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.token import Name, Punctuation, Text
+from pygments.lexers.procfile import ProcfileLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield ProcfileLexer()
+
+
+def test_basic_line(lexer):
+ text = 'task: executable --options'
+
+ tokens = lexer.get_tokens(text)
+
+ for index, token in enumerate(tokens):
+ if index == 0:
+ assert token == (Name.Label, 'task')
+ elif index == 1:
+ assert token == (Punctuation, ':')
+ else:
+ assert token[0] in (Text, Text.Whitespace)
+
+
+def test_environment_variable(lexer):
+ text = '$XDG_SESSION_PATH'
+
+ token = list(lexer.get_tokens(text))[0]
+
+ assert token == (Name.Variable, text)
diff --git a/tests/test_raw_token.py b/tests/test_raw_token.py
new file mode 100644
index 0000000..bae5a49
--- /dev/null
+++ b/tests/test_raw_token.py
@@ -0,0 +1,68 @@
+import bz2
+import gzip
+
+from pygments import highlight
+from pygments.formatters import HtmlFormatter, RawTokenFormatter
+from pygments.lexers import PythonLexer, RawTokenLexer
+
+
+def test_raw_token():
+ code = "2 + α"
+ raw = highlight(code, PythonLexer(), RawTokenFormatter())
+ html = highlight(code, PythonLexer(), HtmlFormatter())
+
+ assert highlight(raw, RawTokenLexer(), RawTokenFormatter()) == raw
+ assert highlight(raw, RawTokenLexer(), HtmlFormatter()) == html
+ assert highlight(raw.decode(), RawTokenLexer(), HtmlFormatter()) == html
+
+ raw_gz = highlight(code, PythonLexer(), RawTokenFormatter(compress="gz"))
+ assert gzip.decompress(raw_gz) == raw
+ assert highlight(raw_gz, RawTokenLexer(compress="gz"), RawTokenFormatter()) == raw
+ assert (
+ highlight(
+ raw_gz.decode("latin1"), RawTokenLexer(compress="gz"), RawTokenFormatter()
+ )
+ == raw
+ )
+
+ raw_bz2 = highlight(code, PythonLexer(), RawTokenFormatter(compress="bz2"))
+ assert bz2.decompress(raw_bz2) == raw
+ assert highlight(raw_bz2, RawTokenLexer(compress="bz2"), RawTokenFormatter()) == raw
+ assert (
+ highlight(
+ raw_bz2.decode("latin1"), RawTokenLexer(compress="bz2"), RawTokenFormatter()
+ )
+ == raw
+ )
+
+
+def test_invalid_raw_token():
+ # These should not throw exceptions.
+ assert (
+ highlight("Tolkien", RawTokenLexer(), RawTokenFormatter())
+ == b"Token.Error\t'Tolkien\\n'\n"
+ )
+ assert (
+ highlight("Tolkien\t'x'", RawTokenLexer(), RawTokenFormatter())
+ == b"Token\t'x'\n"
+ )
+ assert (
+ highlight("Token.Text\t42", RawTokenLexer(), RawTokenFormatter())
+ == b"Token.Error\t'Token.Text\\t42\\n'\n"
+ )
+ assert (
+ highlight("Token.Text\t'", RawTokenLexer(), RawTokenFormatter())
+ == b'Token.Error\t"Token.Text\\t\'\\n"\n'
+ )
+ assert (
+ highlight("Token.Text\t'α'", RawTokenLexer(), RawTokenFormatter())
+ == b"Token.Text\t'\\u03b1'\n"
+ )
+ assert (
+ highlight("Token.Text\tu'α'", RawTokenLexer(), RawTokenFormatter())
+ == b"Token.Text\t'\\u03b1'\n"
+ )
+ assert (
+ highlight(b"Token.Text\t'\xff'", RawTokenLexer(), RawTokenFormatter())
+ == b"Token.Text\t'\\xff'\n"
+ )
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
new file mode 100644
index 0000000..1b9639f
--- /dev/null
+++ b/tests/test_regexlexer.py
@@ -0,0 +1,65 @@
+"""
+ Pygments regex lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.token import Text, Whitespace
+from pygments.lexer import RegexLexer, default
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield MyLexer()
+
+
+class MyLexer(RegexLexer):
+ """Test tuple state transitions including #pop."""
+ tokens = {
+ 'root': [
+ ('a', Text.Root, 'rag'),
+ ('e', Text.Root),
+ ('#', Text.Root, '#pop'),
+ ('@', Text.Root, ('#pop', '#pop')),
+ default(('beer', 'beer'))
+ ],
+ 'beer': [
+ ('d', Text.Beer, ('#pop', '#pop')),
+ ],
+ 'rag': [
+ ('b', Text.Rag, '#push'),
+ ('c', Text.Rag, ('#pop', 'beer')),
+ ],
+ }
+
+
+def test_tuple(lexer):
+ toks = list(lexer.get_tokens_unprocessed('abcde'))
+ assert toks == [
+ (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
+ (3, Text.Beer, 'd'), (4, Text.Root, 'e')]
+
+
+def test_multiline(lexer):
+ toks = list(lexer.get_tokens_unprocessed('a\ne'))
+ assert toks == [
+ (0, Text.Root, 'a'), (1, Whitespace, '\n'), (2, Text.Root, 'e')]
+
+
+def test_default(lexer):
+ toks = list(lexer.get_tokens_unprocessed('d'))
+ assert toks == [(0, Text.Beer, 'd')]
+
+
+def test_pop_empty_regular(lexer):
+ toks = list(lexer.get_tokens_unprocessed('#e'))
+ assert toks == [(0, Text.Root, '#'), (1, Text.Root, 'e')]
+
+
+def test_pop_empty_tuple(lexer):
+ toks = list(lexer.get_tokens_unprocessed('@e'))
+ assert toks == [(0, Text.Root, '@'), (1, Text.Root, 'e')]
diff --git a/tests/test_regexopt.py b/tests/test_regexopt.py
new file mode 100644
index 0000000..2116467
--- /dev/null
+++ b/tests/test_regexopt.py
@@ -0,0 +1,102 @@
+"""
+ Tests for pygments.regexopt
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import random
+from itertools import combinations_with_replacement
+
+from pygments.regexopt import regex_opt
+
+ALPHABET = ['a', 'b', 'c', 'd', 'e']
+
+N_TRIES = 15
+
+
+def generate_keywordlist(length):
+ return [''.join(p) for p in
+ combinations_with_replacement(ALPHABET, length)]
+
+
+def test_randomly():
+ # generate a list of all possible keywords of a certain length using
+ # a restricted alphabet, then choose some to match and make sure only
+ # those do
+ for n in range(3, N_TRIES):
+ kwlist = generate_keywordlist(n)
+ to_match = random.sample(kwlist,
+ random.randint(1, len(kwlist) - 1))
+ no_match = set(kwlist) - set(to_match)
+ rex = re.compile(regex_opt(to_match))
+ assert rex.groups == 1
+ for w in to_match:
+ assert rex.match(w)
+ for w in no_match:
+ assert not rex.match(w)
+
+
+def test_prefix():
+ opt = regex_opt(('a', 'b'), prefix=r':{1,2}')
+ print(opt)
+ rex = re.compile(opt)
+ assert not rex.match('a')
+ assert rex.match('::a')
+ assert not rex.match(':::') # fullmatch
+
+
+def test_suffix():
+ opt = regex_opt(('a', 'b'), suffix=r':{1,2}')
+ print(opt)
+ rex = re.compile(opt)
+ assert not rex.match('a')
+ assert rex.match('a::')
+ assert not rex.match(':::') # fullmatch
+
+
+def test_suffix_opt():
+ # test that detected suffixes remain sorted.
+ opt = regex_opt(('afoo', 'abfoo'))
+ print(opt)
+ rex = re.compile(opt)
+ m = rex.match('abfoo')
+ assert m.end() == 5
+
+
+def test_different_length_grouping():
+ opt = regex_opt(('a', 'xyz'))
+ print(opt)
+ rex = re.compile(opt)
+ assert rex.match('a')
+ assert rex.match('xyz')
+ assert not rex.match('b')
+ assert rex.groups == 1
+
+
+def test_same_length_grouping():
+ opt = regex_opt(('a', 'b'))
+ print(opt)
+ rex = re.compile(opt)
+ assert rex.match('a')
+ assert rex.match('b')
+ assert not rex.match('x')
+
+ assert rex.groups == 1
+ groups = rex.match('a').groups()
+ assert groups == ('a',)
+
+
+def test_same_length_suffix_grouping():
+ opt = regex_opt(('a', 'b'), suffix='(m)')
+ print(opt)
+ rex = re.compile(opt)
+ assert rex.match('am')
+ assert rex.match('bm')
+ assert not rex.match('xm')
+ assert not rex.match('ax')
+ assert rex.groups == 2
+ groups = rex.match('am').groups()
+ assert groups == ('a', 'm')
diff --git a/tests/test_robotframework_lexer.py b/tests/test_robotframework_lexer.py
new file mode 100644
index 0000000..807fbc4
--- /dev/null
+++ b/tests/test_robotframework_lexer.py
@@ -0,0 +1,38 @@
+"""
+ Pygments Robot Framework lexer tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.robotframework import RobotFrameworkLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield RobotFrameworkLexer()
+
+
+def assert_same_text(lexer, text):
+ """Show that lexed text does not remove any content. """
+ tokens = list(lexer.get_tokens_unprocessed(text))
+ output = ''.join(t[2] for t in tokens)
+ assert text == output
+
+
+def test_empty_brackets_after_scalar_variable(lexer):
+ assert_same_text(lexer, '*** Variables ***\n'
+ '${test}[]\n')
+
+
+def test_empty_brackets_after_list_variable(lexer):
+ assert_same_text(lexer, '*** Variables ***\n'
+ '@{test}[]\n')
+
+
+def test_empty_brackets_after_dict_variable(lexer):
+ assert_same_text(lexer, '*** Variables ***\n'
+ '&{test}[]\n')
diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
new file mode 100644
index 0000000..1f3ee6e
--- /dev/null
+++ b/tests/test_rtf_formatter.py
@@ -0,0 +1,107 @@
+"""
+ Pygments RTF formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from io import StringIO
+
+from pygments.formatters import RtfFormatter
+from pygments.lexers.special import TextLexer
+
+
+foot = (r'\par' '\n' r'}')
+
+
+def _escape(string):
+ return string.replace("\n", r"\n")
+
+
+def _build_message(*args, **kwargs):
+ string = kwargs.get('string', None)
+ t = _escape(kwargs.get('t', ''))
+ expected = _escape(kwargs.get('expected', ''))
+ result = _escape(kwargs.get('result', ''))
+
+ if string is None:
+ string = ("The expected output of '{t}'\n"
+ "\t\tShould be '{expected}'\n"
+ "\t\tActually outputs '{result}'\n"
+ "\t(WARNING: Partial Output of Result!)")
+
+ end = -len(_escape(foot))
+ start = end - len(expected)
+
+ return string.format(t=t,
+ result = result[start:end],
+ expected = expected)
+
+
+def format_rtf(t):
+ tokensource = list(TextLexer().get_tokens(t))
+ fmt = RtfFormatter()
+ buf = StringIO()
+ fmt.format(tokensource, buf)
+ result = buf.getvalue()
+ buf.close()
+ return result
+
+
+def test_rtf_header():
+ t = ''
+ result = format_rtf(t)
+ expected = r'{\rtf1\ansi\uc0'
+ msg = ("RTF documents are expected to start with '{expected}'\n"
+ "\t\tStarts intead with '{result}'\n"
+ "\t(WARNING: Partial Output of Result!)".format(
+ expected=expected,
+ result=result[:len(expected)]))
+ assert result.startswith(expected), msg
+
+
+def test_rtf_footer():
+ t = ''
+ result = format_rtf(t)
+ expected = ''
+ msg = ("RTF documents are expected to end with '{expected}'\n"
+ "\t\tEnds intead with '{result}'\n"
+ "\t(WARNING: Partial Output of Result!)".format(
+ expected=_escape(expected),
+ result=_escape(result[-len(expected):])))
+ assert result.endswith(expected+foot), msg
+
+
+def test_ascii_characters():
+ t = 'a b c d ~'
+ result = format_rtf(t)
+ expected = (r'a b c d ~')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected+foot), msg
+
+
+def test_escape_characters():
+ t = '\\ {{'
+ result = format_rtf(t)
+ expected = r'\\ \{\{'
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected+foot), msg
+
+
+def test_single_characters():
+ t = 'â € ¤ каждой'
+ result = format_rtf(t)
+ expected = (r'{\u226} {\u8364} {\u164} '
+ r'{\u1082}{\u1072}{\u1078}{\u1076}{\u1086}{\u1081}')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected+foot), msg
+
+
+def test_double_characters():
+ t = 'က 힣 ↕ ↕︎ 鼖'
+ result = format_rtf(t)
+ expected = (r'{\u4096} {\u55203} {\u8597} '
+ r'{\u8597}{\u65038} {\u55422}{\u56859}')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected+foot), msg
diff --git a/tests/test_ruby.py b/tests/test_ruby.py
new file mode 100644
index 0000000..a272926
--- /dev/null
+++ b/tests/test_ruby.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+"""
+ Basic RubyLexer Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.token import Name
+from pygments.lexers.ruby import RubyLexer
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield RubyLexer()
+
+
+@pytest.mark.parametrize(
+ 'method_name',
+ (
+ # Bare, un-scoped method names
+ 'a', 'A', 'z', 'Z', 'は', '\u0080', '\uffff',
+ 'aは0_', 'はA__9', '\u0080はa0_', '\uffff__99Z',
+
+ # Method names with trailing characters
+ 'aは!', 'はz?', 'はa=',
+
+ # Scoped method names
+ 'self.a', 'String.は_', 'example.AZ09_!',
+
+ # Operator overrides
+ '+', '+@', '-', '-@', '!', '!@', '~', '~@',
+ '*', '**', '/', '%', '&', '^', '`',
+ '<=>', '<', '<<', '<=', '>', '>>', '>=',
+ '==', '!=', '===', '=~', '!~',
+ '[]', '[]=',
+ )
+)
+def test_positive_method_names(lexer, method_name):
+ """Validate positive method name parsing."""
+
+ text = 'def ' + method_name
+ assert list(lexer.get_tokens(text))[-2] == (Name.Function, method_name.rpartition('.')[2])
+
+
+@pytest.mark.parametrize('method_name', ('1', '_', '<>', '<<=', '>>=', '&&', '||', '==?', '==!', '===='))
+def test_negative_method_names(lexer, method_name):
+ """Validate negative method name parsing."""
+
+ text = 'def ' + method_name
+ assert list(lexer.get_tokens(text))[-2] != (Name.Function, method_name)
diff --git a/tests/test_sql.py b/tests/test_sql.py
new file mode 100644
index 0000000..e0a20f9
--- /dev/null
+++ b/tests/test_sql.py
@@ -0,0 +1,115 @@
+"""
+ Pygments SQL lexers tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.sql import name_between_bracket_re, \
+ name_between_backtick_re, tsql_go_re, tsql_declare_re, \
+ tsql_variable_re, MySqlLexer, TransactSqlLexer
+
+from pygments.token import Comment, Name, Number, Punctuation, Whitespace
+
+
+@pytest.fixture(scope='module')
+def lexer():
+ yield TransactSqlLexer()
+
+
+def _assert_are_tokens_of_type(lexer, examples, expected_token_type):
+ for test_number, example in enumerate(examples.split(), 1):
+ token_count = 0
+ for token_type, token_value in lexer.get_tokens(example):
+ if token_type != Whitespace:
+ token_count += 1
+ assert token_type == expected_token_type, \
+ 'token_type #%d for %s is be %s but must be %s' % \
+ (test_number, token_value, token_type, expected_token_type)
+ assert token_count == 1, \
+ '%s must yield exactly 1 token instead of %d' % \
+ (example, token_count)
+
+
+def _assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline):
+ actual_tokens = tuple(lexer.get_tokens(text))
+ if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
+ actual_tokens = tuple(actual_tokens[:-1])
+ assert expected_tokens_without_trailing_newline == actual_tokens, \
+ 'text must yield expected tokens: %s' % text
+
+
+def test_can_lex_float(lexer):
+ _assert_are_tokens_of_type(lexer,
+ '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2',
+ Number.Float)
+ _assert_tokens_match(lexer,
+ '1e2.1e2',
+ ((Number.Float, '1e2'), (Number.Float, '.1e2')))
+
+
+def test_can_reject_almost_float(lexer):
+ _assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1')))
+
+
+def test_can_lex_integer(lexer):
+ _assert_are_tokens_of_type(lexer, '1 23 456', Number.Integer)
+
+
+def test_can_lex_names(lexer):
+ _assert_are_tokens_of_type(
+ lexer,
+ 'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2',
+ Name)
+
+
+def test_can_lex_comments(lexer):
+ _assert_tokens_match(lexer, '--\n', ((Comment.Single, '--\n'),))
+ _assert_tokens_match(lexer, '/**/', (
+ (Comment.Multiline, '/*'), (Comment.Multiline, '*/')
+ ))
+ _assert_tokens_match(lexer, '/*/**/*/', (
+ (Comment.Multiline, '/*'),
+ (Comment.Multiline, '/*'),
+ (Comment.Multiline, '*/'),
+ (Comment.Multiline, '*/'),
+ ))
+
+
+def test_can_match_analyze_text_res():
+ assert ['`a`', '`bc`'] == \
+ name_between_backtick_re.findall('select `a`, `bc` from some')
+ assert ['[a]', '[bc]'] == \
+ name_between_bracket_re.findall('select [a], [bc] from some')
+ assert tsql_declare_re.search('--\nDeClaRe @some int;')
+ assert tsql_go_re.search('select 1\ngo\n--')
+ assert tsql_variable_re.search('create procedure dbo.usp_x @a int, @b int')
+
+
+def test_can_analyze_text():
+ mysql_lexer = MySqlLexer()
+ tsql_lexer = TransactSqlLexer()
+ code_to_expected_lexer_map = {
+ 'select `a`, `bc` from some': mysql_lexer,
+ 'select [a], [bc] from some': tsql_lexer,
+ '-- `a`, `bc`\nselect [a], [bc] from some': tsql_lexer,
+ '-- `a`, `bc`\nselect [a], [bc] from some; go': tsql_lexer,
+ }
+ sql_lexers = set(code_to_expected_lexer_map.values())
+ for code, expected_lexer in code_to_expected_lexer_map.items():
+ ratings_and_lexers = list((lexer.analyse_text(code), lexer.name) for lexer in sql_lexers)
+ best_rating, best_lexer_name = sorted(ratings_and_lexers, reverse=True)[0]
+ expected_rating = expected_lexer.analyse_text(code)
+ message = (
+ 'lexer must be %s (rating %.2f) instead of '
+ '%s (rating %.2f) for analyse_text() on code:\n%s') % (
+ expected_lexer.name,
+ expected_rating,
+ best_lexer_name,
+ best_rating,
+ code
+ )
+ assert expected_lexer.name == best_lexer_name, message
diff --git a/tests/test_templates.py b/tests/test_templates.py
new file mode 100644
index 0000000..9ed816f
--- /dev/null
+++ b/tests/test_templates.py
@@ -0,0 +1,130 @@
+import pytest
+
+from pygments.lexers.templates import JavascriptDjangoLexer, MasonLexer, \
+ SqlJinjaLexer, VelocityLexer
+
+from pygments.token import Comment
+
+
+@pytest.fixture(scope="module")
+def lexer():
+ yield JavascriptDjangoLexer()
+
+@pytest.fixture(scope='module')
+def lexerMason():
+ yield MasonLexer()
+
+@pytest.fixture(scope='module')
+def lexerVelocity():
+ yield VelocityLexer()
+
+@pytest.fixture(scope='module')
+def lexerSqlJinja():
+ yield SqlJinjaLexer()
+
+def test_do_not_mistake_JSDoc_for_django_comment(lexer):
+ """
+ Test to make sure the lexer doesn't mistake
+ {* ... *} to be a django comment
+ """
+ text = """/**
+ * @param {*} cool
+ */
+ func = function(cool) {
+ };
+
+ /**
+ * @param {*} stuff
+ */
+ fun = function(stuff) {
+ };"""
+ tokens = lexer.get_tokens(text)
+ assert not any(t[0] == Comment for t in tokens)
+
+def test_mason_unnamed_block(lexerMason):
+ text = """
+ <%class>
+ has 'foo';
+ has 'bar' => (required => 1);
+ has 'baz' => (isa => 'Int', default => 17);
+ </%class>
+ """
+ res = lexerMason.analyse_text(text)
+ assert res == 1.0
+
+def test_velocity_macro(lexerVelocity):
+ text = """
+ #macro(getBookListLink, $readingTrackerResult)
+ $readingTrackerResult.getBookListLink()
+ #end
+ """
+ res = lexerVelocity.analyse_text(text)
+ assert res == 0.26
+
+def test_velocity_foreach(lexerVelocity):
+ text = """
+ <ul>
+ #foreach( $product in $allProducts )
+ <li>$product</li>
+ #end
+ </ul>
+ """
+ res = lexerVelocity.analyse_text(text)
+ assert res == 0.16
+
+def test_velocity_if(lexerVelocity):
+ text = """
+ #if( $display )
+ <strong>Velocity!</strong>
+ #end
+ """
+ res = lexerVelocity.analyse_text(text)
+ assert res == 0.16
+
+def test_velocity_reference(lexerVelocity):
+ text = """
+ Hello $name! Welcome to Velocity!
+ """
+ res = lexerVelocity.analyse_text(text)
+ assert res == 0.01
+
+def test_sql_jinja_dbt_ref(lexerSqlJinja):
+ text = """
+ {%- set payment_methods = ["bank_transfer", "credit_card", "gift_card"] -%}
+
+ select
+ order_id,
+ {%- for payment_method in payment_methods %}
+ sum(case when payment_method = '{{payment_method}}' then amount end) as {{payment_method}}_amount
+ {%- if not loop.last %},{% endif -%}
+ {% endfor %}
+ from {{ ref('raw_payments') }}
+ group by 1
+ """
+ res = lexerSqlJinja.analyse_text(text)
+ assert res == 0.4
+
+def test_sql_jinja_dbt_source(lexerSqlJinja):
+ text = """
+ {%- set payment_methods = ["bank_transfer", "credit_card", "gift_card"] -%}
+
+ select
+ order_id,
+ {%- for payment_method in payment_methods %}
+ sum(case when payment_method = '{{payment_method}}' then amount end) as {{payment_method}}_amount
+ {%- if not loop.last %},{% endif -%}
+ {% endfor %}
+ from {{ source('payments_db', 'payments') }}
+ group by 1
+ """
+ res = lexerSqlJinja.analyse_text(text)
+ assert res == 0.25
+
+def test_sql_jinja_dbt_macro(lexerSqlJinja):
+ text = """
+ {% macro cents_to_dollars(column_name, precision=2) %}
+ ({{ column_name }} / 100)::numeric(16, {{ precision }})
+ {% endmacro %}
+ """
+ res = lexerSqlJinja.analyse_text(text)
+ assert res == 0.15
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
new file mode 100644
index 0000000..c1a7ec7
--- /dev/null
+++ b/tests/test_terminal_formatter.py
@@ -0,0 +1,100 @@
+"""
+ Pygments terminal formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from io import StringIO
+
+from pygments.lexers.sql import PlPgsqlLexer
+from pygments.formatters import TerminalFormatter, Terminal256Formatter, \
+ HtmlFormatter, LatexFormatter
+
+from pygments.style import Style
+from pygments.token import Token
+from pygments.lexers import Python3Lexer
+from pygments import highlight
+
+DEMO_TEXT = '''\
+-- comment
+select
+* from bar;
+'''
+DEMO_LEXER = PlPgsqlLexer
+DEMO_TOKENS = list(DEMO_LEXER().get_tokens(DEMO_TEXT))
+
+ANSI_RE = re.compile(r'\x1b[\w\W]*?m')
+
+
+def strip_ansi(x):
+ return ANSI_RE.sub('', x)
+
+
+def test_reasonable_output():
+ out = StringIO()
+ TerminalFormatter().format(DEMO_TOKENS, out)
+ plain = strip_ansi(out.getvalue())
+ assert DEMO_TEXT.count('\n') == plain.count('\n')
+ print(repr(plain))
+
+ for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+ assert a == b
+
+
+def test_reasonable_output_lineno():
+ out = StringIO()
+ TerminalFormatter(linenos=True).format(DEMO_TOKENS, out)
+ plain = strip_ansi(out.getvalue())
+ assert DEMO_TEXT.count('\n') + 1 == plain.count('\n')
+ print(repr(plain))
+
+ for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+ assert a in b
+
+
+class MyStyle(Style):
+ styles = {
+ Token.Comment: 'ansibrightblack',
+ Token.String: 'ansibrightblue bg:ansired',
+ Token.Number: 'ansibrightgreen bg:ansigreen',
+ Token.Number.Hex: 'ansigreen bg:ansibrightred',
+ }
+
+
+CODE = '''
+# this should be a comment
+print("Hello World")
+async def function(a,b,c, *d, **kwarg:Bool)->Bool:
+ pass
+ return 123, 0xb3e3
+
+'''
+
+
+def test_style_html():
+ style = HtmlFormatter(style=MyStyle).get_style_defs()
+ assert '#555555' in style, "ansigray for comment not html css style"
+
+
+def test_others_work():
+ """Check other formatters don't crash."""
+ highlight(CODE, Python3Lexer(), LatexFormatter(style=MyStyle))
+ highlight(CODE, Python3Lexer(), HtmlFormatter(style=MyStyle))
+
+
+def test_256esc_seq():
+ """
+ Test that a few escape sequences are actually used when using ansi<> color
+ codes.
+ """
+ def termtest(x):
+ return highlight(x, Python3Lexer(),
+ Terminal256Formatter(style=MyStyle))
+
+ assert '32;101' in termtest('0x123')
+ assert '92;42' in termtest('123')
+ assert '90' in termtest('#comment')
+ assert '94;41' in termtest('"String"')
diff --git a/tests/test_thingsdb.py b/tests/test_thingsdb.py
new file mode 100644
index 0000000..25e3816
--- /dev/null
+++ b/tests/test_thingsdb.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+"""
+ Basic ThingsDB Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+
+from pygments.token import Number, Text, Comment
+from pygments.lexers import ThingsDBLexer
+
+
+class ThingsDBTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = ThingsDBLexer()
+ self.maxDiff = None
+
+ def testNumber(self):
+ fragment = u'42'
+ tokens = [
+ (Number.Integer, u'42'),
+ (Text.Whitespace, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testThingId(self):
+ fragment = u'#42'
+ tokens = [
+ (Comment.Preproc, u'#42'),
+ (Text.Whitespace, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_tnt.py b/tests/test_tnt.py
new file mode 100644
index 0000000..c2282ac
--- /dev/null
+++ b/tests/test_tnt.py
@@ -0,0 +1,226 @@
+"""
+ Typograhic Number Theory tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.tnt import TNTLexer
+from pygments.token import Text, Operator, Keyword, Name, Number, \
+ Punctuation, Error
+
+
+@pytest.fixture(autouse=True)
+def lexer():
+ yield TNTLexer()
+
+
+# whitespace
+
+@pytest.mark.parametrize('text', (' a', ' \t0', '\n\n 3'))
+def test_whitespace_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as whitespace text."""
+ assert lexer.whitespace(0, text) == len(text) - 1
+ assert lexer.whitespace(0, text, True) == len(text) - 1
+ assert lexer.cur[-1] == (0, Text, text[:-1])
+
+
+@pytest.mark.parametrize('text', ('0 a=b premise', 'b=a symmetry'))
+def test_whitespace_negative_matches(lexer, text):
+ """Test statements that do not start with whitespace text."""
+ assert lexer.whitespace(0, text) == 0
+ with pytest.raises(AssertionError):
+ lexer.whitespace(0, text, True)
+ assert not lexer.cur
+
+
+# terms that can go on either side of an = sign
+
+@pytest.mark.parametrize('text', ('a ', "a' ", 'b ', "c' "))
+def test_variable_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as variables."""
+ assert lexer.variable(0, text) == len(text) - 1
+ assert lexer.cur[-1] == (0, Name.Variable, text[:-1])
+
+
+@pytest.mark.parametrize('text', ("' ", 'f ', "f' "))
+def test_variable_negative_matches(lexer, text):
+ """Test fragments that should **not** be tokenized as variables."""
+ with pytest.raises(AssertionError):
+ lexer.variable(0, text)
+ assert not lexer.cur
+
+
+@pytest.mark.parametrize('text', ('0', 'S0', 'SSSSS0'))
+def test_numeral_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as (unary) numerals."""
+ assert lexer.term(0, text) == len(text)
+ assert lexer.cur[-1] == (len(text) - 1, Number.Integer, text[-1])
+ if text != '0':
+ assert lexer.cur[-2] == (0, Number.Integer, text[:-1])
+
+
+@pytest.mark.parametrize('text', (
+ '(a+b)', '(b.a)', '(c+d)'
+))
+def test_multiterm_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as a compound term."""
+ assert lexer.term(0, text) == len(text)
+ assert [t[1] for t in lexer.cur] == [
+ Punctuation, Name.Variable, Operator,
+ Name.Variable, Punctuation
+ ]
+
+
+@pytest.mark.parametrize('text', ('1', '=', 'A'))
+def test_term_negative_matches(lexer, text):
+ """Test fragments that should not be tokenized as terms at all."""
+ with pytest.raises(AssertionError):
+ lexer.term(0, text)
+ assert not lexer.cur
+
+
+# full statements, minus rule
+
+@pytest.mark.parametrize('text', ('~a=b ', '~~~~a=b '))
+def test_negator_positive_matches(lexer, text):
+ """Test statements that start with a negation."""
+ assert lexer.formula(0, text) == len(text) - 1
+ assert lexer.cur[0] == (0, Operator, text[:-4])
+
+
+@pytest.mark.parametrize('text', ('Aa:a=b ', 'Eb:a=b '))
+def test_quantifier_positive_matches(lexer, text):
+ """Test statements that start with a quantifier."""
+ assert lexer.formula(0, text) == len(text) - 1
+ assert lexer.cur[0][1] == Keyword.Declaration
+ assert lexer.cur[1][1] == Name.Variable
+ assert lexer.cur[2] == (2, Punctuation, ':')
+
+
+@pytest.mark.parametrize('text', ('Aaa=b', 'Eba=b'))
+def test_quantifier_negative_matches(lexer, text):
+ """Test quantifiers that are only partially valid."""
+ with pytest.raises(AssertionError):
+ lexer.formula(0, text)
+ # leftovers should still be valid
+ assert lexer.cur[0][1] == Keyword.Declaration
+ assert lexer.cur[1][1] == Name.Variable
+
+
+@pytest.mark.parametrize('text', ('<a=b&b=a>', '<a=b|b=a>', '<a=b]b=a>'))
+def test_compound_positive_matches(lexer, text):
+ """Test statements that consist of multiple formulas compounded."""
+ assert lexer.formula(0, text) == len(text)
+ assert lexer.cur[0] == (0, Punctuation, '<')
+ assert lexer.cur[4][1] == Operator
+ assert lexer.cur[-1] == (len(text)-1, Punctuation, '>')
+
+
+@pytest.mark.parametrize('text', ('<a=b/b=a>', '<a=b&b=a '))
+def test_compound_negative_matches(lexer, text):
+ """Test statements that look like compounds but are invalid."""
+ with pytest.raises(AssertionError):
+ lexer.formula(0, text)
+ assert lexer.cur[0] == (0, Punctuation, '<')
+
+
+@pytest.mark.parametrize('text', ('a=b ', 'a=0 ', '0=b '))
+def test_formula_postive_matches(lexer, text):
+ """Test the normal singular formula."""
+ assert lexer.formula(0, text) == len(text) - 1
+ assert lexer.cur[0][2] == text[0]
+ assert lexer.cur[1] == (1, Operator, '=')
+ assert lexer.cur[2][2] == text[2]
+
+
+@pytest.mark.parametrize('text', ('a/b', '0+0 '))
+def test_formula_negative_matches(lexer, text):
+ """Test anything but an equals sign."""
+ with pytest.raises(AssertionError):
+ lexer.formula(0, text)
+
+
+# rules themselves
+
+@pytest.mark.parametrize('text', (
+ 'fantasy rule', 'carry over line 5', 'premise', 'joining',
+ 'double-tilde', 'switcheroo', 'De Morgan', 'specification'
+))
+def test_rule_positive_matches(lexer, text):
+ """Test some valid rules of TNT."""
+ assert lexer.rule(0, text) == len(text)
+ assert lexer.cur[0][:2] == (0, Keyword)
+ if text[-1].isdigit():
+ assert lexer.cur[1][1] == Number.Integer
+
+
+@pytest.mark.parametrize('text', (
+ 'fantasy', 'carry over', 'premse', 'unjoining',
+ 'triple-tilde', 'switcheru', 'De-Morgan', 'despecification'
+))
+def test_rule_negative_matches(lexer, text):
+ """Test some invalid rules of TNT."""
+ with pytest.raises(AssertionError):
+ lexer.rule(0, text)
+
+
+# referrals
+
+@pytest.mark.parametrize('text', ('(lines 1, 2, and 4)', '(line 3,5,6)',
+ '(lines 1, 6 and 0)'))
+def test_lineno_positive_matches(lexer, text):
+ """Test line referrals."""
+ assert lexer.lineno(0, text) == len(text)
+ assert lexer.cur[0] == (0, Punctuation, '(')
+ assert lexer.cur[1][:2] == (1, Text)
+ assert lexer.cur[2][1] == Number.Integer
+ assert lexer.cur[3] == (len(text)-1, Punctuation, ')')
+
+
+@pytest.mark.parametrize('text', (
+ '(lines one, two, and four)1 ', # to avoid IndexError
+ '(lines 1 2 and 3)', '(lines 1 2 3)'
+))
+def test_lineno_negative_matches(lexer, text):
+ """Test invalid line referrals."""
+ with pytest.raises(AssertionError):
+ lexer.lineno(0, text)
+
+
+# worst-case: error text
+
+@pytest.mark.parametrize('text', ('asdf', 'fdsa\nasdf', 'asdf\n '))
+def test_error_till_line_end(lexer, text):
+ try:
+ nl = text.index('\n')
+ except ValueError:
+ nl = len(text)
+ try:
+ end = text.find(text.split(None, 2)[1])
+ except IndexError: # split failed
+ end = len(text)
+ assert lexer.error_till_line_end(0, text) == end
+ assert lexer.cur[0] == (0, Error, text[:nl])
+
+
+# full statement, including rule (because this can't be tested any other way)
+
+@pytest.mark.parametrize('text', ('[ push', '] pop'))
+def test_fantasy_positive_matches(lexer, text):
+ """Test statements that should be tokenized as push/pop statements."""
+ assert lexer.get_tokens_unprocessed(text)[0] == (0, Keyword, text[0])
+
+
+# full text is already done by examplefiles, but here's some exceptions
+
+@pytest.mark.parametrize('text', (
+ '0', 'a=b', 'premise',
+ '0 a=b premise', '1 b=a symmetry (line 0)'
+))
+def test_no_crashing(lexer, text):
+ """Test incomplete text fragments that shouldn't crash the whole lexer."""
+ assert lexer.get_tokens(text)
diff --git a/tests/test_token.py b/tests/test_token.py
new file mode 100644
index 0000000..c155475
--- /dev/null
+++ b/tests/test_token.py
@@ -0,0 +1,51 @@
+"""
+ Test suite for the token module
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import copy
+
+import pytest
+
+from pygments import token
+
+
+def test_tokentype():
+ t = token.String
+ assert t.split() == [token.Token, token.Literal, token.String]
+ assert t.__class__ is token._TokenType
+
+
+def test_functions():
+ assert token.is_token_subtype(token.String, token.String)
+ assert token.is_token_subtype(token.String, token.Literal)
+ assert not token.is_token_subtype(token.Literal, token.String)
+
+ assert token.string_to_tokentype(token.String) is token.String
+ assert token.string_to_tokentype('') is token.Token
+ assert token.string_to_tokentype('String') is token.String
+
+
+def test_sanity_check():
+ stp = token.STANDARD_TYPES.copy()
+ stp[token.Token] = '---' # Token and Text do conflict, that is okay
+ t = {}
+ for k, v in stp.items():
+ t.setdefault(v, []).append(k)
+ if len(t) == len(stp):
+ return # Okay
+
+ for k, v in t.items():
+ if len(v) > 1:
+ pytest.fail("%r has more than one key: %r" % (k, v))
+
+
+def test_copying():
+ # Token instances are supposed to be singletons, so copying or even
+ # deepcopying should return themselves
+ t = token.String
+ assert t is copy.copy(t)
+ assert t is copy.deepcopy(t)
diff --git a/tests/test_unistring.py b/tests/test_unistring.py
new file mode 100644
index 0000000..65fb1fc
--- /dev/null
+++ b/tests/test_unistring.py
@@ -0,0 +1,45 @@
+"""
+ Test suite for the unistring module
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import random
+
+from pygments import unistring as uni
+
+
+def test_cats_exist_and_compilable():
+ for cat in uni.cats:
+ s = getattr(uni, cat)
+ if s == '': # Probably Cs on Jython
+ continue
+ print("%s %r" % (cat, s))
+ re.compile('[%s]' % s)
+
+
+def _cats_that_match(c):
+ matching_cats = []
+ for cat in uni.cats:
+ s = getattr(uni, cat)
+ if s == '': # Probably Cs on Jython
+ continue
+ if re.compile('[%s]' % s).match(c):
+ matching_cats.append(cat)
+ return matching_cats
+
+
+def test_spot_check_types():
+ # Each char should match one, and precisely one, category
+ random.seed(0)
+ for i in range(1000):
+ o = random.randint(0, 65535)
+ c = chr(o)
+ if o > 0xd800 and o <= 0xdfff and not uni.Cs:
+ continue # Bah, Jython.
+ print(hex(o))
+ cats = _cats_that_match(c)
+ assert len(cats) == 1, "%d (%s): %s" % (o, c, cats)
diff --git a/tests/test_usd.py b/tests/test_usd.py
new file mode 100755
index 0000000..5438983
--- /dev/null
+++ b/tests/test_usd.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+"""Test that syntax highlighting for USD files works correctly."""
+
+import textwrap
+import unittest
+
+from pygments.lexers import UsdLexer
+from pygments.token import Name, String, Whitespace
+
+
+class _Common(unittest.TestCase):
+ """A basic class that makes it easier to write unittests."""
+
+ def setUp(self):
+ """Create a fresh USD lexer class before each test runs."""
+ self.lexer = UsdLexer()
+
+ def _get(self, code):
+ """Tokenize the code into its unique parts.
+
+ :param code: The USD source code to split up.
+ :type code: str
+
+ :returns: The tokenized pieces.
+ :rtype: list[:class:`pygments.token._TokenType`]
+
+ """
+ return list(self.lexer.get_tokens(code))
+
+
+class Features(_Common):
+ """Test that different features of USD highlight as expected."""
+
+ def test_asset_path(self):
+ """Check that a regular file path highlights correctly."""
+ for path in [
+ "@./some/path/to/a/file/foo.usda@",
+ "@/some/path/to/a/file/foo.usda@",
+ "@some/path/to/a/file/foo.usda@",
+ r"@file://SPECI__Z-_ALIZED(syntax_here)?with_arbitrary#)(%*&)\characters.tar.gz@",
+ ]:
+ expected = [
+ (String.Interpol, path),
+ (Whitespace, "\n"),
+ ]
+
+ self.assertEqual(expected, self._get(path))
+
+ def test_target_absolute(self):
+ """Check that SdfPath syntax examples work correctly."""
+ for code in [
+ # Absolute paths
+ "</some/another_one/here>",
+ "</some/path/here.property_name>",
+ "</some/path/here>",
+ # Relative paths
+ "<../some/another_one/here>",
+ "<../some/path/here.property_name>",
+ "<../some/path/here>",
+ ]:
+ self.assertEqual(
+ [(Name.Namespace, code), (Whitespace, "\n")], self._get(code),
+ )
diff --git a/tests/test_using_api.py b/tests/test_using_api.py
new file mode 100644
index 0000000..7b0b030
--- /dev/null
+++ b/tests/test_using_api.py
@@ -0,0 +1,39 @@
+"""
+ Pygments tests for using()
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pytest import raises
+
+from pygments.lexer import using, bygroups, this, RegexLexer
+from pygments.token import String, Text, Keyword
+
+
+class MyLexer(RegexLexer):
+ tokens = {
+ 'root': [
+ (r'#.*',
+ using(this, state='invalid')),
+ (r'(")(.+?)(")',
+ bygroups(String, using(this, state='string'), String)),
+ (r'[^"]+', Text),
+ ],
+ 'string': [
+ (r'.+', Keyword),
+ ],
+ }
+
+
+def test_basic():
+ expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
+ (String, '"'), (Text, 'e\n')]
+ assert list(MyLexer().get_tokens('a"bcd"e')) == expected
+
+
+def test_error():
+ def gen():
+ return list(MyLexer().get_tokens('#a'))
+ assert raises(KeyError, gen)
diff --git a/tests/test_util.py b/tests/test_util.py
new file mode 100644
index 0000000..d140836
--- /dev/null
+++ b/tests/test_util.py
@@ -0,0 +1,189 @@
+"""
+ Test suite for the util module
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pytest import raises
+
+from pygments import util, console
+
+
+class FakeLexer:
+ def analyse(text):
+ return text
+ analyse = util.make_analysator(analyse)
+
+
+def test_getoptions():
+ assert util.get_bool_opt({}, 'a', True) is True
+ assert util.get_bool_opt({}, 'a', 1) is True
+ assert util.get_bool_opt({}, 'a', 'true') is True
+ assert util.get_bool_opt({}, 'a', 'no') is False
+ assert raises(util.OptionError, util.get_bool_opt, {}, 'a', [])
+ assert raises(util.OptionError, util.get_bool_opt, {}, 'a', 'foo')
+
+ assert util.get_int_opt({}, 'a', 1) == 1
+ assert raises(util.OptionError, util.get_int_opt, {}, 'a', [])
+ assert raises(util.OptionError, util.get_int_opt, {}, 'a', 'bar')
+
+ assert util.get_list_opt({}, 'a', [1]) == [1]
+ assert util.get_list_opt({}, 'a', '1 2') == ['1', '2']
+ assert raises(util.OptionError, util.get_list_opt, {}, 'a', 1)
+
+ assert util.get_choice_opt({}, 'a', ['foo', 'bar'], 'bar') == 'bar'
+ assert util.get_choice_opt({}, 'a', ['foo', 'bar'], 'Bar', True) == 'bar'
+ assert raises(util.OptionError, util.get_choice_opt, {}, 'a',
+ ['foo', 'bar'], 'baz')
+
+
+def test_docstring_headline():
+ def f1():
+ """
+ docstring headline
+
+ other text
+ """
+ def f2():
+ """
+ docstring
+ headline
+
+ other text
+ """
+ def f3():
+ pass
+
+ assert util.docstring_headline(f1) == 'docstring headline'
+ assert util.docstring_headline(f2) == 'docstring headline'
+ assert util.docstring_headline(f3) == ''
+
+
+def test_analysator_returns_float():
+ # If an analysator wrapped by make_analysator returns a floating point
+ # number, then that number will be returned by the wrapper.
+ assert FakeLexer.analyse('0.5') == 0.5
+
+
+def test_analysator_returns_boolean():
+ # If an analysator wrapped by make_analysator returns a boolean value,
+ # then the wrapper will return 1.0 if the boolean was True or 0.0 if
+ # it was False.
+ assert FakeLexer.analyse(True) == 1.0
+ assert FakeLexer.analyse(False) == 0.0
+
+
+def test_analysator_raises_exception():
+ # If an analysator wrapped by make_analysator raises an exception,
+ # then the wrapper will return 0.0.
+ class ErrorLexer:
+ def analyse(text):
+ raise RuntimeError('something bad happened')
+ analyse = util.make_analysator(analyse)
+ assert ErrorLexer.analyse('') == 0.0
+
+
+def test_analysator_value_error():
+ # When converting the analysator's return value to a float a
+ # ValueError may occur. If that happens 0.0 is returned instead.
+ assert FakeLexer.analyse('bad input') == 0.0
+
+
+def test_analysator_type_error():
+ # When converting the analysator's return value to a float a
+ # TypeError may occur. If that happens 0.0 is returned instead.
+ assert FakeLexer.analyse('xxx') == 0.0
+
+
+def test_shebang_matches():
+ assert util.shebang_matches('#!/usr/bin/env python\n', r'python(2\.\d)?')
+ assert util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
+ assert util.shebang_matches('#!/usr/bin/startsomethingwith python',
+ r'python(2\.\d)?')
+ assert util.shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
+
+ assert not util.shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
+ assert not util.shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
+ assert not util.shebang_matches('#!', r'python')
+
+
+def test_doctype_matches():
+ assert util.doctype_matches('<!DOCTYPE html> <html>', 'html.*')
+ assert not util.doctype_matches(
+ '<?xml ?> <DOCTYPE html PUBLIC "a"> <html>', 'html.*')
+ assert util.html_doctype_matches(
+ '<?xml ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN">')
+
+
+def test_xml():
+ assert util.looks_like_xml(
+ '<?xml ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN">')
+ assert util.looks_like_xml('<html xmlns>abc</html>')
+ assert not util.looks_like_xml('<html>')
+
+
+def test_format_lines():
+ lst = ['cat', 'dog']
+ output = util.format_lines('var', lst)
+ d = {}
+ exec(output, d)
+ assert isinstance(d['var'], tuple)
+ assert ('cat', 'dog') == d['var']
+
+
+def test_duplicates_removed_seq_types():
+ # tuple
+ x = util.duplicates_removed(('a', 'a', 'b'))
+ assert ['a', 'b'] == x
+ # list
+ x = util.duplicates_removed(['a', 'a', 'b'])
+ assert ['a', 'b'] == x
+ # iterator
+ x = util.duplicates_removed(iter(('a', 'a', 'b')))
+ assert ['a', 'b'] == x
+
+
+def test_duplicates_removed_nonconsecutive():
+ # keeps first
+ x = util.duplicates_removed(('a', 'b', 'a'))
+ assert ['a', 'b'] == x
+
+
+def test_guess_decode():
+ # UTF-8 should be decoded as UTF-8
+ s = util.guess_decode('\xff'.encode())
+ assert s == ('\xff', 'utf-8')
+
+ # otherwise, it could be latin1 or the locale encoding...
+ import locale
+ s = util.guess_decode(b'\xff')
+ assert s[1] in ('latin1', locale.getpreferredencoding())
+
+
+def test_guess_decode_from_terminal():
+ class Term:
+ encoding = 'utf-7'
+
+ s = util.guess_decode_from_terminal('\xff'.encode('utf-7'), Term)
+ assert s == ('\xff', 'utf-7')
+
+ s = util.guess_decode_from_terminal('\xff'.encode(), Term)
+ assert s == ('\xff', 'utf-8')
+
+
+def test_console_ansiformat():
+ f = console.ansiformat
+ c = console.codes
+ all_attrs = f('+*_blue_*+', 'text')
+ assert c['blue'] in all_attrs and c['blink'] in all_attrs
+ assert c['bold'] in all_attrs and c['underline'] in all_attrs
+ assert c['reset'] in all_attrs
+ assert raises(KeyError, f, '*mauve*', 'text')
+
+
+def test_console_functions():
+ assert console.reset_color() == console.codes['reset']
+ assert console.colorize('blue', 'text') == \
+ console.codes['blue'] + 'text' + console.codes['reset']
diff --git a/tests/test_words.py b/tests/test_words.py
new file mode 100644
index 0000000..9a8730a
--- /dev/null
+++ b/tests/test_words.py
@@ -0,0 +1,366 @@
+"""
+ Pygments tests for words()
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Token
+
+
+class MyLexer(RegexLexer):
+ tokens = {
+ "root": [
+ (
+ words(
+ [
+ "a-word",
+ "another-word",
+ # Test proper escaping of a few things that can occur
+ # in regular expressions. They are all matched literally.
+ "[",
+ "]",
+ "^",
+ "\\",
+ "(",
+ ")",
+ "(?:",
+ "-",
+ "|",
+ r"\w",
+ ]
+ ),
+ Token.Name,
+ ),
+ (words(["space-allowed-before-this"], prefix=" ?"), Token.Name),
+ (words(["space-allowed-after-this"], suffix=" ?"), Token.Name),
+ (
+ words(["space-required-before-and-after-this"], prefix=" ", suffix=" "),
+ Token.Name,
+ ),
+ # prefix and suffix can be regexes.
+ (words(["one-whitespace-allowed-before-this"], prefix=r"\s?"), Token.Name),
+ (words(["all-whitespace-allowed-after-this"], suffix=r"\s*"), Token.Name),
+ (
+ words(
+ ["all-whitespace-allowed-one-required-after-this"], suffix=r"\s+"
+ ),
+ Token.Name,
+ ),
+ (r"\n", Token.Text),
+ ],
+ }
+
+
+def test_basic():
+ s = "a-word this-is-not-in-the-list another-word"
+ assert list(MyLexer().get_tokens(s)) == [
+ (Token.Name, "a-word"),
+ (Token.Error, " "),
+ (Token.Error, "t"),
+ (Token.Error, "h"),
+ (Token.Error, "i"),
+ (Token.Error, "s"),
+ (Token.Name, "-"),
+ (Token.Error, "i"),
+ (Token.Error, "s"),
+ (Token.Name, "-"),
+ (Token.Error, "n"),
+ (Token.Error, "o"),
+ (Token.Error, "t"),
+ (Token.Name, "-"),
+ (Token.Error, "i"),
+ (Token.Error, "n"),
+ (Token.Name, "-"),
+ (Token.Error, "t"),
+ (Token.Error, "h"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "l"),
+ (Token.Error, "i"),
+ (Token.Error, "s"),
+ (Token.Error, "t"),
+ (Token.Error, " "),
+ (Token.Name, "another-word"),
+ (Token.Text, "\n"),
+ ]
+
+
+def test_special_characters():
+ s = """
+[
+]
+^
+\\
+(
+)
+(?:
+-
+|
+\\w
+"""
+ assert list(MyLexer().get_tokens(s)) == [
+ (Token.Name, "["),
+ (Token.Text, "\n"),
+ (Token.Name, "]"),
+ (Token.Text, "\n"),
+ (Token.Name, "^"),
+ (Token.Text, "\n"),
+ (Token.Name, "\\"),
+ (Token.Text, "\n"),
+ (Token.Name, "("),
+ (Token.Text, "\n"),
+ (Token.Name, ")"),
+ (Token.Text, "\n"),
+ (Token.Name, "(?:"),
+ (Token.Text, "\n"),
+ (Token.Name, "-"),
+ (Token.Text, "\n"),
+ (Token.Name, "|"),
+ (Token.Text, "\n"),
+ (Token.Name, "\\w"),
+ (Token.Text, "\n"),
+ ]
+
+
+def test_affixes():
+ s = """
+space-allowed-after-this |
+space-allowed-before-this
+space-allowed-after-this
+ space-required-before-and-after-this |
+space-required-before-and-after-this |
+ space-required-before-and-after-this<= no space after
+"""
+ assert list(MyLexer().get_tokens(s)) == [
+ (Token.Name, "space-allowed-after-this "),
+ (Token.Name, "|"),
+ (Token.Text, "\n"),
+ (Token.Name, "space-allowed-before-this"),
+ (Token.Text, "\n"),
+ (Token.Name, "space-allowed-after-this"),
+ (Token.Text, "\n"),
+ (Token.Name, " space-required-before-and-after-this "),
+ (Token.Name, "|"),
+ (Token.Text, "\n"),
+ (Token.Error, "s"),
+ (Token.Error, "p"),
+ (Token.Error, "a"),
+ (Token.Error, "c"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Error, "q"),
+ (Token.Error, "u"),
+ (Token.Error, "i"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Error, "d"),
+ (Token.Name, "-"),
+ (Token.Error, "b"),
+ (Token.Error, "e"),
+ (Token.Error, "f"),
+ (Token.Error, "o"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "a"),
+ (Token.Error, "n"),
+ (Token.Error, "d"),
+ (Token.Name, "-"),
+ (Token.Error, "a"),
+ (Token.Error, "f"),
+ (Token.Error, "t"),
+ (Token.Error, "e"),
+ (Token.Error, "r"),
+ (Token.Name, "-"),
+ (Token.Error, "t"),
+ (Token.Error, "h"),
+ (Token.Error, "i"),
+ (Token.Error, "s"),
+ (Token.Error, " "),
+ (Token.Name, "|"),
+ (Token.Text, "\n"),
+ (Token.Error, " "),
+ (Token.Error, "s"),
+ (Token.Error, "p"),
+ (Token.Error, "a"),
+ (Token.Error, "c"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Error, "q"),
+ (Token.Error, "u"),
+ (Token.Error, "i"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Error, "d"),
+ (Token.Name, "-"),
+ (Token.Error, "b"),
+ (Token.Error, "e"),
+ (Token.Error, "f"),
+ (Token.Error, "o"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "a"),
+ (Token.Error, "n"),
+ (Token.Error, "d"),
+ (Token.Name, "-"),
+ (Token.Error, "a"),
+ (Token.Error, "f"),
+ (Token.Error, "t"),
+ (Token.Error, "e"),
+ (Token.Error, "r"),
+ (Token.Name, "-"),
+ (Token.Error, "t"),
+ (Token.Error, "h"),
+ (Token.Error, "i"),
+ (Token.Error, "s"),
+ (Token.Error, "<"),
+ (Token.Error, "="),
+ (Token.Error, " "),
+ (Token.Error, "n"),
+ (Token.Error, "o"),
+ (Token.Error, " "),
+ (Token.Error, "s"),
+ (Token.Error, "p"),
+ (Token.Error, "a"),
+ (Token.Error, "c"),
+ (Token.Error, "e"),
+ (Token.Error, " "),
+ (Token.Error, "a"),
+ (Token.Error, "f"),
+ (Token.Error, "t"),
+ (Token.Error, "e"),
+ (Token.Error, "r"),
+ (Token.Text, "\n"),
+ ]
+
+
+def test_affixes_regexes():
+ s = """
+ one-whitespace-allowed-before-this
+NOT-WHITESPACEone-whitespace-allowed-before-this
+all-whitespace-allowed-after-this \n \t
+all-whitespace-allowed-after-thisNOT-WHITESPACE
+all-whitespace-allowed-one-required-after-thisNOT-WHITESPACE"""
+ assert list(MyLexer().get_tokens(s)) == [
+ (Token.Name, " one-whitespace-allowed-before-this"),
+ (Token.Text, "\n"),
+ (Token.Error, "N"),
+ (Token.Error, "O"),
+ (Token.Error, "T"),
+ (Token.Name, "-"),
+ (Token.Error, "W"),
+ (Token.Error, "H"),
+ (Token.Error, "I"),
+ (Token.Error, "T"),
+ (Token.Error, "E"),
+ (Token.Error, "S"),
+ (Token.Error, "P"),
+ (Token.Error, "A"),
+ (Token.Error, "C"),
+ (Token.Error, "E"),
+ (Token.Name, "one-whitespace-allowed-before-this"),
+ (Token.Text, "\n"),
+ (Token.Name, "all-whitespace-allowed-after-this \n \t\n"),
+ (Token.Name, "all-whitespace-allowed-after-this"),
+ (Token.Error, "N"),
+ (Token.Error, "O"),
+ (Token.Error, "T"),
+ (Token.Name, "-"),
+ (Token.Error, "W"),
+ (Token.Error, "H"),
+ (Token.Error, "I"),
+ (Token.Error, "T"),
+ (Token.Error, "E"),
+ (Token.Error, "S"),
+ (Token.Error, "P"),
+ (Token.Error, "A"),
+ (Token.Error, "C"),
+ (Token.Error, "E"),
+ (Token.Text, "\n"),
+ (Token.Error, "a"),
+ (Token.Error, "l"),
+ (Token.Error, "l"),
+ (Token.Name, "-"),
+ (Token.Error, "w"),
+ (Token.Error, "h"),
+ (Token.Error, "i"),
+ (Token.Error, "t"),
+ (Token.Error, "e"),
+ (Token.Error, "s"),
+ (Token.Error, "p"),
+ (Token.Error, "a"),
+ (Token.Error, "c"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "a"),
+ (Token.Error, "l"),
+ (Token.Error, "l"),
+ (Token.Error, "o"),
+ (Token.Error, "w"),
+ (Token.Error, "e"),
+ (Token.Error, "d"),
+ (Token.Name, "-"),
+ (Token.Error, "o"),
+ (Token.Error, "n"),
+ (Token.Error, "e"),
+ (Token.Name, "-"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Error, "q"),
+ (Token.Error, "u"),
+ (Token.Error, "i"),
+ (Token.Error, "r"),
+ (Token.Error, "e"),
+ (Token.Error, "d"),
+ (Token.Name, "-"),
+ (Token.Error, "a"),
+ (Token.Error, "f"),
+ (Token.Error, "t"),
+ (Token.Error, "e"),
+ (Token.Error, "r"),
+ (Token.Name, "-"),
+ (Token.Error, "t"),
+ (Token.Error, "h"),
+ (Token.Error, "i"),
+ (Token.Error, "s"),
+ (Token.Error, "N"),
+ (Token.Error, "O"),
+ (Token.Error, "T"),
+ (Token.Name, "-"),
+ (Token.Error, "W"),
+ (Token.Error, "H"),
+ (Token.Error, "I"),
+ (Token.Error, "T"),
+ (Token.Error, "E"),
+ (Token.Error, "S"),
+ (Token.Error, "P"),
+ (Token.Error, "A"),
+ (Token.Error, "C"),
+ (Token.Error, "E"),
+ (Token.Text, "\n"),
+ ]
+
+
+class MySecondLexer(RegexLexer):
+ tokens = {
+ "root": [
+ (words(["[", "x"]), Token.Name),
+ ],
+ }
+
+
+def test_bracket_escape():
+ s = "whatever"
+ # This used to emit a FutureWarning.
+ assert list(MySecondLexer().get_tokens("x")) == [
+ (Token.Name, "x"),
+ (Token.Text.Whitespace, "\n"),
+ ]
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..dc0a5be
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,15 @@
+[tox]
+envlist = py{36, 37, 38, 39, 310}, lint
+
+[testenv]
+deps =
+ pytest
+ pytest-cov
+ wcag-contrast-ratio
+commands = pytest {posargs}
+
+
+[testenv:lint]
+deps =
+ git+https://github.com/pygments/regexlint.git@master
+commands = regexlint pygments.lexers